hadoop git commit: HDFS-10183. Prevent race condition during class initialization. Contributed by Pavel Avgustinov.

2018-04-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 e3a49fe1d -> ac2bea815


HDFS-10183. Prevent race condition during class initialization. Contributed by 
Pavel Avgustinov.

(cherry picked from commit f40969a141ec6aff254c41e4185cc61ea9e4e554)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac2bea81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac2bea81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac2bea81

Branch: refs/heads/branch-2.8
Commit: ac2bea815fae9aff0ed08118228af65164d9171a
Parents: e3a49fe
Author: Sangjin Lee 
Authored: Fri Apr 20 20:33:10 2018 -0700
Committer: Sangjin Lee 
Committed: Fri Apr 20 21:14:46 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java   | 8 
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java  | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac2bea81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..5990c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -40,7 +40,7 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   // use separate mutex to avoid possible deadlock when stopping the thread.
   private final Object syncThreadLock = new Object();
   private Thread syncThread;
-  private static ThreadLocal threadEdit = new ThreadLocal();
+  private static final ThreadLocal THREAD_EDIT = new ThreadLocal();
 
   // requires concurrent access from caller threads and syncing thread.
   private final BlockingQueue editPendingQ =
@@ -114,16 +114,16 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
   @Override
   void logEdit(final FSEditLogOp op) {
 Edit edit = getEditInstance(op);
-threadEdit.set(edit);
+THREAD_EDIT.set(edit);
 enqueueEdit(edit);
   }
 
   @Override
   public void logSync() {
-Edit edit = threadEdit.get();
+Edit edit = THREAD_EDIT.get();
 if (edit != null) {
   // do NOT remove to avoid expunge & rehash penalties.
-  threadEdit.set(null);
+  THREAD_EDIT.set(null);
   if (LOG.isDebugEnabled()) {
 LOG.debug("logSync " + edit);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac2bea81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index ae9bfe1..7531ae6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -148,7 +148,7 @@ public abstract class FSEditLogOp {
   int rpcCallId;
 
   public static class OpInstanceCache {
-private static ThreadLocal cache =
+private static final ThreadLocal CACHE =
 new ThreadLocal() {
   @Override
   protected OpInstanceCacheMap initialValue() {
@@ -179,7 +179,7 @@ public abstract class FSEditLogOp {
 
 @SuppressWarnings("unchecked")
 public  T get(FSEditLogOpCodes opCode) {
-  return useCache ? (T)cache.get().get(opCode) : (T)newInstance(opCode);
+  return useCache ? (T)CACHE.get().get(opCode) : (T)newInstance(opCode);
 }
 
 private static FSEditLogOp newInstance(FSEditLogOpCodes opCode) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10183. Prevent race condition during class initialization. Contributed by Pavel Avgustinov.

2018-04-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a32c05e0a -> 6c885edac


HDFS-10183. Prevent race condition during class initialization. Contributed by 
Pavel Avgustinov.

(cherry picked from commit f40969a141ec6aff254c41e4185cc61ea9e4e554)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c885eda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c885eda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c885eda

Branch: refs/heads/branch-2.9
Commit: 6c885edacd6fa247ca9cdea5033f053aa27fb09a
Parents: a32c05e
Author: Sangjin Lee 
Authored: Fri Apr 20 20:33:10 2018 -0700
Committer: Sangjin Lee 
Committed: Fri Apr 20 21:13:29 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java   | 8 
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java  | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c885eda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..5990c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -40,7 +40,7 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   // use separate mutex to avoid possible deadlock when stopping the thread.
   private final Object syncThreadLock = new Object();
   private Thread syncThread;
-  private static ThreadLocal threadEdit = new ThreadLocal();
+  private static final ThreadLocal THREAD_EDIT = new ThreadLocal();
 
   // requires concurrent access from caller threads and syncing thread.
   private final BlockingQueue editPendingQ =
@@ -114,16 +114,16 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
   @Override
   void logEdit(final FSEditLogOp op) {
 Edit edit = getEditInstance(op);
-threadEdit.set(edit);
+THREAD_EDIT.set(edit);
 enqueueEdit(edit);
   }
 
   @Override
   public void logSync() {
-Edit edit = threadEdit.get();
+Edit edit = THREAD_EDIT.get();
 if (edit != null) {
   // do NOT remove to avoid expunge & rehash penalties.
-  threadEdit.set(null);
+  THREAD_EDIT.set(null);
   if (LOG.isDebugEnabled()) {
 LOG.debug("logSync " + edit);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c885eda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 1ea2b81..33bc1c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -148,7 +148,7 @@ public abstract class FSEditLogOp {
   int rpcCallId;
 
   public static class OpInstanceCache {
-private static ThreadLocal cache =
+private static final ThreadLocal CACHE =
 new ThreadLocal() {
   @Override
   protected OpInstanceCacheMap initialValue() {
@@ -179,7 +179,7 @@ public abstract class FSEditLogOp {
 
 @SuppressWarnings("unchecked")
 public  T get(FSEditLogOpCodes opCode) {
-  return useCache ? (T)cache.get().get(opCode) : (T)newInstance(opCode);
+  return useCache ? (T)CACHE.get().get(opCode) : (T)newInstance(opCode);
 }
 
 private static FSEditLogOp newInstance(FSEditLogOpCodes opCode) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10183. Prevent race condition during class initialization. Contributed by Pavel Avgustinov.

2018-04-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4bff2df13 -> 6becb3de0


HDFS-10183. Prevent race condition during class initialization. Contributed by 
Pavel Avgustinov.

(cherry picked from commit f40969a141ec6aff254c41e4185cc61ea9e4e554)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6becb3de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6becb3de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6becb3de

Branch: refs/heads/branch-3.0
Commit: 6becb3de0b95245b36f967be18ec3b1ae91c4a08
Parents: 4bff2df
Author: Sangjin Lee 
Authored: Fri Apr 20 20:33:10 2018 -0700
Committer: Sangjin Lee 
Committed: Fri Apr 20 21:10:39 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java   | 8 
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java  | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6becb3de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..5990c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -40,7 +40,7 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   // use separate mutex to avoid possible deadlock when stopping the thread.
   private final Object syncThreadLock = new Object();
   private Thread syncThread;
-  private static ThreadLocal threadEdit = new ThreadLocal();
+  private static final ThreadLocal THREAD_EDIT = new ThreadLocal();
 
   // requires concurrent access from caller threads and syncing thread.
   private final BlockingQueue editPendingQ =
@@ -114,16 +114,16 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
   @Override
   void logEdit(final FSEditLogOp op) {
 Edit edit = getEditInstance(op);
-threadEdit.set(edit);
+THREAD_EDIT.set(edit);
 enqueueEdit(edit);
   }
 
   @Override
   public void logSync() {
-Edit edit = threadEdit.get();
+Edit edit = THREAD_EDIT.get();
 if (edit != null) {
   // do NOT remove to avoid expunge & rehash penalties.
-  threadEdit.set(null);
+  THREAD_EDIT.set(null);
   if (LOG.isDebugEnabled()) {
 LOG.debug("logSync " + edit);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6becb3de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index c0daaf1..8293a82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -157,7 +157,7 @@ public abstract class FSEditLogOp {
   int rpcCallId;
 
   public static class OpInstanceCache {
-private static ThreadLocal cache =
+private static final ThreadLocal CACHE =
 new ThreadLocal() {
   @Override
   protected OpInstanceCacheMap initialValue() {
@@ -188,7 +188,7 @@ public abstract class FSEditLogOp {
 
 @SuppressWarnings("unchecked")
 public  T get(FSEditLogOpCodes opCode) {
-  return useCache ? (T)cache.get().get(opCode) : (T)newInstance(opCode);
+  return useCache ? (T)CACHE.get().get(opCode) : (T)newInstance(opCode);
 }
 
 private static FSEditLogOp newInstance(FSEditLogOpCodes opCode) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10183. Prevent race condition during class initialization. Contributed by Pavel Avgustinov.

2018-04-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5556cf397 -> 9f648f6e5


HDFS-10183. Prevent race condition during class initialization. Contributed by 
Pavel Avgustinov.

(cherry picked from commit f40969a141ec6aff254c41e4185cc61ea9e4e554)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f648f6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f648f6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f648f6e

Branch: refs/heads/branch-3.1
Commit: 9f648f6e59b2ab21d0200d042ce6ea5e4eb2518c
Parents: 5556cf3
Author: Sangjin Lee 
Authored: Fri Apr 20 20:33:10 2018 -0700
Committer: Sangjin Lee 
Committed: Fri Apr 20 20:36:31 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java   | 8 
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java  | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f648f6e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..5990c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -40,7 +40,7 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   // use separate mutex to avoid possible deadlock when stopping the thread.
   private final Object syncThreadLock = new Object();
   private Thread syncThread;
-  private static ThreadLocal threadEdit = new ThreadLocal();
+  private static final ThreadLocal THREAD_EDIT = new ThreadLocal();
 
   // requires concurrent access from caller threads and syncing thread.
   private final BlockingQueue editPendingQ =
@@ -114,16 +114,16 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
   @Override
   void logEdit(final FSEditLogOp op) {
 Edit edit = getEditInstance(op);
-threadEdit.set(edit);
+THREAD_EDIT.set(edit);
 enqueueEdit(edit);
   }
 
   @Override
   public void logSync() {
-Edit edit = threadEdit.get();
+Edit edit = THREAD_EDIT.get();
 if (edit != null) {
   // do NOT remove to avoid expunge & rehash penalties.
-  threadEdit.set(null);
+  THREAD_EDIT.set(null);
   if (LOG.isDebugEnabled()) {
 LOG.debug("logSync " + edit);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f648f6e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index c0daaf1..8293a82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -157,7 +157,7 @@ public abstract class FSEditLogOp {
   int rpcCallId;
 
   public static class OpInstanceCache {
-private static ThreadLocal cache =
+private static final ThreadLocal CACHE =
 new ThreadLocal() {
   @Override
   protected OpInstanceCacheMap initialValue() {
@@ -188,7 +188,7 @@ public abstract class FSEditLogOp {
 
 @SuppressWarnings("unchecked")
 public  T get(FSEditLogOpCodes opCode) {
-  return useCache ? (T)cache.get().get(opCode) : (T)newInstance(opCode);
+  return useCache ? (T)CACHE.get().get(opCode) : (T)newInstance(opCode);
 }
 
 private static FSEditLogOp newInstance(FSEditLogOpCodes opCode) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10183. Prevent race condition during class initialization. Contributed by Pavel Avgustinov.

2018-04-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 28e224439 -> f40969a14


HDFS-10183. Prevent race condition during class initialization. Contributed by 
Pavel Avgustinov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f40969a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f40969a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f40969a1

Branch: refs/heads/trunk
Commit: f40969a141ec6aff254c41e4185cc61ea9e4e554
Parents: 28e2244
Author: Sangjin Lee 
Authored: Fri Apr 20 20:33:10 2018 -0700
Committer: Sangjin Lee 
Committed: Fri Apr 20 20:33:10 2018 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java   | 8 
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java  | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f40969a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..5990c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -40,7 +40,7 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   // use separate mutex to avoid possible deadlock when stopping the thread.
   private final Object syncThreadLock = new Object();
   private Thread syncThread;
-  private static ThreadLocal threadEdit = new ThreadLocal();
+  private static final ThreadLocal THREAD_EDIT = new ThreadLocal();
 
   // requires concurrent access from caller threads and syncing thread.
   private final BlockingQueue editPendingQ =
@@ -114,16 +114,16 @@ class FSEditLogAsync extends FSEditLog implements 
Runnable {
   @Override
   void logEdit(final FSEditLogOp op) {
 Edit edit = getEditInstance(op);
-threadEdit.set(edit);
+THREAD_EDIT.set(edit);
 enqueueEdit(edit);
   }
 
   @Override
   public void logSync() {
-Edit edit = threadEdit.get();
+Edit edit = THREAD_EDIT.get();
 if (edit != null) {
   // do NOT remove to avoid expunge & rehash penalties.
-  threadEdit.set(null);
+  THREAD_EDIT.set(null);
   if (LOG.isDebugEnabled()) {
 LOG.debug("logSync " + edit);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f40969a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index c0daaf1..8293a82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -157,7 +157,7 @@ public abstract class FSEditLogOp {
   int rpcCallId;
 
   public static class OpInstanceCache {
-private static ThreadLocal cache =
+private static final ThreadLocal CACHE =
 new ThreadLocal() {
   @Override
   protected OpInstanceCacheMap initialValue() {
@@ -188,7 +188,7 @@ public abstract class FSEditLogOp {
 
 @SuppressWarnings("unchecked")
 public  T get(FSEditLogOpCodes opCode) {
-  return useCache ? (T)cache.get().get(opCode) : (T)newInstance(opCode);
+  return useCache ? (T)CACHE.get().get(opCode) : (T)newInstance(opCode);
 }
 
 private static FSEditLogOp newInstance(FSEditLogOpCodes opCode) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1822338 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2018-01-26 Thread sjlee
Author: sjlee
Date: Fri Jan 26 18:48:25 2018
New Revision: 1822338

URL: http://svn.apache.org/viewvc?rev=1822338=rev
Log:
Updated Sangjin's company.

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/committer_criteria.pdf
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/versioning.pdf
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1822338=1822337=1822338=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Fri 
Jan 26 18:48:25 2018
@@ -442,7 +442,7 @@
 
   sjlee
   http://people.apache.org/~sjlee;>Sangjin Lee
-  Twitter
+  PayPal
   
   -8
 
@@ -1398,7 +1398,7 @@

      sjlee
  http://people.apache.org/~sjlee;>Sangjin Lee
- Twitter
+ PayPal
  
  -8


Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/committer_criteria.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/committer_criteria.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/issue_tracking.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/linkmap.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/mailing_lists.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/privacy_policy.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/releases.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/version_control.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/version_control.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/versioning.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/versioning.pdf?rev=1822338=1822337=1822338=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/who.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1822338=1822337=1822338=diff
==
--- hadoop/common/site/main/publish/who.html (original)
+++ hadoop/common/site/main/publish/who.html Fri Jan 26 18:48:25 2018
@@ -851,7 +851,7 @@ document.write("Last Published: " + docu
   
 s

hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4767d1bb1 -> f919bcadb


YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.

(cherry picked from commit 8b336632acad10e45d029596c5e3196e1857d891)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f919bcad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f919bcad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f919bcad

Branch: refs/heads/branch-3.0
Commit: f919bcadb70f6ff1c87ae45b4fa04fa61ec8ddd7
Parents: 4767d1b
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:54:30 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f919bcad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f919bcad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+

[1/2] hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7d747df52 -> a79422f71
  refs/heads/branch-2.8 c54310a63 -> 6d6758f75


YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.

(cherry picked from commit 8b336632acad10e45d029596c5e3196e1857d891)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a79422f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a79422f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a79422f7

Branch: refs/heads/branch-2
Commit: a79422f71727ed764fb9e89e3eaf0e079c27991e
Parents: 7d747df
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:46:10 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a79422f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a79422f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = 

[2/2] hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.

(cherry picked from commit 8b336632acad10e45d029596c5e3196e1857d891)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d6758f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d6758f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d6758f7

Branch: refs/heads/branch-2.8
Commit: 6d6758f751f4f230f90117e58dd0ae1006320a47
Parents: c54310a
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:50:55 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6758f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d6758f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+protoClazz = conf.getClassByName(getProtoClassName(protocol));
   } catch 

hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-20 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53047f934 -> 8b336632a


YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b336632
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b336632
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b336632

Branch: refs/heads/trunk
Commit: 8b336632acad10e45d029596c5e3196e1857d891
Parents: 53047f9
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:22:43 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+protoClazz = conf.getClassByName(getProtoClassName(protocol));
   } catch 

hadoop git commit: HADOOP-14366. Maven upgrade broke start-build-env.sh. Contributed by Akira Ajisaka.

2017-05-01 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4cfc86643 -> 14b5c93f3


HADOOP-14366. Maven upgrade broke start-build-env.sh. Contributed by
Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b5c93f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b5c93f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b5c93f

Branch: refs/heads/trunk
Commit: 14b5c93f3cba4a04369989f93f14ea99409aa1d8
Parents: 4cfc866
Author: Sangjin Lee 
Authored: Mon May 1 15:54:01 2017 -0700
Committer: Sangjin Lee 
Committed: Mon May 1 15:54:01 2017 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b5c93f/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index f939b1d..1775323 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -93,6 +93,7 @@ RUN mkdir -p /opt/maven && \
  -o /opt/maven.tar.gz && \
 tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
 ENV MAVEN_HOME /opt/maven
+ENV PATH "$PATH:/opt/maven/bin"
 
 ##
 # Install findbugs


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6027 addendum. Fixed the broken build for YARN-5355-branch-2. Contributed by Sangjin Lee.

2017-03-01 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 ff55ff274 -> ee514df4b


YARN-6027 addendum. Fixed the broken build for YARN-5355-branch-2. Contributed 
by Sangjin Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee514df4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee514df4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee514df4

Branch: refs/heads/YARN-5355-branch-2
Commit: ee514df4b290ed5e392b4a5538b70a355368d29b
Parents: ff55ff2
Author: Sangjin Lee 
Authored: Wed Mar 1 15:29:17 2017 -0800
Committer: Sangjin Lee 
Committed: Wed Mar 1 15:29:17 2017 -0800

--
 .../AbstractTimelineReaderHBaseTestBase.java| 22 +---
 1 file changed, 5 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee514df4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
index 7853c94..ccfdfd0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -86,19 +86,7 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
   "org.apache.hadoop.yarn.server.timelineservice.storage."
   + "HBaseTimelineReaderImpl");
   config.setInt("hfile.format.version", 3);
-  server = new TimelineReaderServer() {
-@Override
-protected void setupOptions(Configuration conf) {
-  // The parent code tries to use HttpServer2 from this version of
-  // Hadoop, but the tests are loading in HttpServer2 from
-  // ${hbase-compatible-hadoop.version}. This version uses Jetty 9
-  // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
-  // are many differences, including classnames and packages.
-  // We do nothing here, so that we don't cause a NoSuchMethodError.
-  // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
-  // we should be able to remove this @Override.
-}
-  };
+  server = new TimelineReaderServer();
   server.init(config);
   server.start();
   serverPort = server.getWebServerPort();
@@ -119,11 +107,11 @@ public abstract class AbstractTimelineReaderHBaseTestBase 
{
 ClientResponse resp =
 client.resource(uri).accept(MediaType.APPLICATION_JSON)
 .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-if (resp == null || resp.getStatusInfo()
-.getStatusCode() != ClientResponse.Status.OK.getStatusCode()) {
+if (resp == null ||
+resp.getClientResponseStatus() != ClientResponse.Status.OK) {
   String msg = "";
   if (resp != null) {
-msg = String.valueOf(resp.getStatusInfo().getStatusCode());
+msg = String.valueOf(resp.getClientResponseStatus());
   }
   throw new IOException(
   "Incorrect response from timeline reader. " + "Status=" + msg);
@@ -137,7 +125,7 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
 .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
 assertNotNull(resp);
 assertTrue("Response from server should have been " + status,
-resp.getStatusInfo().getStatusCode() == status.getStatusCode());
+resp.getClientResponseStatus() == status);
 System.out.println("Response is: " + resp.getEntity(String.class));
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.

2017-02-28 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 ab192fd58 -> 34e7c3029
  refs/heads/YARN-5355-branch-2 db7c3f279 -> 57b945581


YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. 
Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34e7c302
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34e7c302
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34e7c302

Branch: refs/heads/YARN-5355
Commit: 34e7c30293b5a56f9f745769a29c5666bdb85d6c
Parents: ab192fd
Author: Sangjin Lee 
Authored: Tue Feb 28 16:10:25 2017 -0800
Committer: Sangjin Lee 
Committed: Tue Feb 28 16:10:25 2017 -0800

--
 .../timelineservice/storage/flow/FlowActivityColumnPrefix.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e7c302/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..5e7a5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -271,7 +271,7 @@ public enum FlowActivityColumnPrefix
 byte[] columnQualifier = getColumnPrefixBytes(qualifier);
 Attribute[] combinedAttributes =
 HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
 combinedAttributes);
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.

2017-02-28 Thread sjlee
YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. 
Contributed by Haibo Chen.

(cherry picked from commit 34e7c30293b5a56f9f745769a29c5666bdb85d6c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57b94558
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57b94558
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57b94558

Branch: refs/heads/YARN-5355-branch-2
Commit: 57b94558160b244ba99434b88c1a786792b725a9
Parents: db7c3f2
Author: Sangjin Lee 
Authored: Tue Feb 28 16:10:25 2017 -0800
Committer: Sangjin Lee 
Committed: Tue Feb 28 16:11:01 2017 -0800

--
 .../timelineservice/storage/flow/FlowActivityColumnPrefix.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57b94558/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..5e7a5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -271,7 +271,7 @@ public enum FlowActivityColumnPrefix
 byte[] columnQualifier = getColumnPrefixBytes(qualifier);
 Attribute[] combinedAttributes =
 HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
 combinedAttributes);
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/42b69405/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String doAsUser,
+  DelegationTokenAuthenticatedURL.Token token) {
+super("TimelineConnector");
+this.requireConnectionRetry = 

[4/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

(cherry picked from commit 73235ab30361b41293846189f3c5fef321ae7cac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42b69405
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42b69405
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42b69405

Branch: refs/heads/YARN-5355-branch-2
Commit: 42b69405fa24abf3949a680524fd3c52f284fa60
Parents: 47ec7f9
Author: Sangjin Lee 
Authored: Thu Feb 16 18:43:31 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 18:58:19 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java | 113 +--
 .../hadoop/yarn/client/api/AMRMClient.java  |  39 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  19 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 822 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1278 insertions(+), 992 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42b69405/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index ee5f8bd..4cf42f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.JsonNodeFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -91,8 +90,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -134,9 +131,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -269,12 +267,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data 

[1/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 b92089c0e -> 73235ab30
  refs/heads/YARN-5355-branch-2 47ec7f927 -> 42b69405f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73235ab3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String 

[2/4] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73235ab3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73235ab3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73235ab3

Branch: refs/heads/YARN-5355
Commit: 73235ab30361b41293846189f3c5fef321ae7cac
Parents: b92089c
Author: Sangjin Lee 
Authored: Thu Feb 16 18:43:31 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 18:43:31 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java | 113 +--
 .../hadoop/yarn/client/api/AMRMClient.java  |  39 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  19 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 823 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1278 insertions(+), 993 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73235ab3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index dc3a29a..bbc7f6e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.node.JsonNodeFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -91,8 +90,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -134,9 +131,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -269,12 +267,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data to the timeline service is enabled");
   if 

[2/2] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate 
classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fa1afdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fa1afdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fa1afdb

Branch: refs/heads/trunk
Commit: 4fa1afdb883dab8786d2fb5c72a195dd2e87d711
Parents: 5690b51
Author: Sangjin Lee 
Authored: Thu Feb 16 11:41:04 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Feb 16 11:41:04 2017 -0800

--
 .../jobhistory/JobHistoryEventHandler.java  |  57 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  14 +-
 .../v2/app/rm/RMContainerAllocator.java |   4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   8 +-
 .../distributedshell/ApplicationMaster.java |  98 ++-
 .../hadoop/yarn/client/api/AMRMClient.java  |  40 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  21 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   5 +-
 .../yarn/client/api/impl/YarnClientImpl.java|  15 +-
 .../hadoop/yarn/client/api/TimelineClient.java  |  94 +--
 .../yarn/client/api/TimelineV2Client.java   |  92 +++
 .../client/api/impl/TimelineClientImpl.java | 825 ++-
 .../yarn/client/api/impl/TimelineConnector.java | 440 ++
 .../client/api/impl/TimelineV2ClientImpl.java   | 459 +++
 .../client/api/impl/TestTimelineClient.java |  39 +-
 .../api/impl/TestTimelineClientV2Impl.java  |   4 +-
 .../timelineservice/NMTimelinePublisher.java|  22 +-
 .../TestNMTimelinePublisher.java|  10 +-
 .../TestTimelineServiceClientIntegration.java   |  10 +-
 19 files changed, 1272 insertions(+), 985 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fa1afdb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 0cc605c..285d36e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -72,13 +72,12 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.JsonNodeFactory;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
 
@@ -90,8 +89,6 @@ import com.sun.jersey.api.client.ClientHandlerException;
  */
 public class JobHistoryEventHandler extends AbstractService
 implements EventHandler {
-  private static final JsonNodeFactory FACTORY =
-  new ObjectMapper().getNodeFactory();
 
   private final AppContext context;
   private final int startCount;
@@ -133,9 +130,10 @@ public class JobHistoryEventHandler extends AbstractService
   // should job completion be force when the AM shuts down?
   protected volatile boolean forceJobCompletion = false;
 
+  @VisibleForTesting
   protected TimelineClient timelineClient;
-
-  private boolean timelineServiceV2Enabled = false;
+  @VisibleForTesting
+  protected TimelineV2Client timelineV2Client;
 
   private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
   private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
@@ -268,12 +266,17 @@ public class JobHistoryEventHandler extends 
AbstractService
 MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
-
-timelineClient 

[1/2] hadoop git commit: YARN-4675. Reorganize TimelineClient and TimelineClientImpl into separate classes for ATSv1.x and ATSv2. Contributed by Naganarasimha G R.

2017-02-16 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5690b51ef -> 4fa1afdb8


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fa1afdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
new file mode 100644
index 000..b5b5f77
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java
@@ -0,0 +1,440 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URL;
+import java.net.URLConnection;
+import java.security.GeneralSecurityException;
+import java.security.PrivilegedExceptionAction;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
+import 
org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.client.filter.ClientFilter;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Utility Connector class which is used by timeline clients to securely get
+ * connected to the timeline server.
+ *
+ */
+public class TimelineConnector extends AbstractService {
+
+  private static final Joiner JOINER = Joiner.on("");
+  private static final Log LOG = LogFactory.getLog(TimelineConnector.class);
+  public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute
+
+  private SSLFactory sslFactory;
+  private Client client;
+  private ConnectionConfigurator connConfigurator;
+  private DelegationTokenAuthenticator authenticator;
+  private DelegationTokenAuthenticatedURL.Token token;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
+  @VisibleForTesting
+  TimelineClientConnectionRetry connectionRetry;
+  private boolean requireConnectionRetry;
+
+  public TimelineConnector(boolean requireConnectionRetry,
+  UserGroupInformation authUgi, String doAsUser,
+  DelegationTokenAuthenticatedURL.Token token) {

[1/2] hadoop git commit: YARN-3637. Handle localization sym-linking correctly at the YARN level. Contributed by Chris Trezzo.

2017-01-25 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c4d3b28c2 -> b799ea764
  refs/heads/trunk cd59b9cca -> 425a7e502


YARN-3637. Handle localization sym-linking correctly at the YARN level. 
Contributed by Chris Trezzo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/425a7e50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/425a7e50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/425a7e50

Branch: refs/heads/trunk
Commit: 425a7e502869c4250aba927ecc3c6f3c561c6ff2
Parents: cd59b9c
Author: Sangjin Lee 
Authored: Wed Jan 25 15:51:36 2017 -0800
Committer: Sangjin Lee 
Committed: Wed Jan 25 15:51:36 2017 -0800

--
 .../yarn/client/api/SharedCacheClient.java  | 23 +---
 .../client/api/impl/SharedCacheClientImpl.java  | 31 ++--
 .../api/impl/TestSharedCacheClientImpl.java | 37 +---
 3 files changed, 81 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/425a7e50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
index 7cbe0e1..60c1bd98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
@@ -55,22 +55,37 @@ public abstract class SharedCacheClient extends 
AbstractService {
* {@link ApplicationId} to identify which application will be using the
* resource.
* 
-   * 
+   *
* 
* The SharedCacheManager responds with whether or not the
* resource exists in the cache. If the resource exists, a Path
* to the resource in the shared cache is returned. If the resource does not
* exist, null is returned instead.
* 
-   * 
+   *
+   * 
+   * Once a path has been returned for a resource, that path is safe to use for
+   * the lifetime of the application that corresponds to the provided
+   * ApplicationId.
+   * 
+   *
+   * 
+   * Additionally, a name for the resource should be specified. A fragment will
+   * be added to the path with the desired name if the desired name is 
different
+   * than the name of the provided path from the shared cache. This ensures 
that
+   * if the returned path is used to create a LocalResource, then the symlink
+   * created during YARN localization will match the name specified.
+   * 
+   *
* @param applicationId ApplicationId of the application using the resource
* @param resourceKey the key (i.e. checksum) that identifies the resource
+   * @param resourceName the desired name of the resource
* @return Path to the resource, or null if it does not exist
*/
   @Public
   @Unstable
-  public abstract Path use(ApplicationId applicationId, String resourceKey)
-  throws YarnException;
+  public abstract Path use(ApplicationId applicationId, String resourceKey,
+  String resourceName) throws YarnException;
 
   /**
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/425a7e50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index 0a61ee0..b910c28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -111,8 +113,8 @@ public class SharedCacheClientImpl extends 
SharedCacheClient {
   }
 
   @Override
-  public Path use(ApplicationId applicationId, String resourceKey)
-  throws YarnException {
+  public Path use(ApplicationId applicationId, String 

[2/2] hadoop git commit: YARN-3637. Handle localization sym-linking correctly at the YARN level. Contributed by Chris Trezzo.

2017-01-25 Thread sjlee
YARN-3637. Handle localization sym-linking correctly at the YARN level. 
Contributed by Chris Trezzo.

(cherry picked from commit 425a7e502869c4250aba927ecc3c6f3c561c6ff2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b799ea76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b799ea76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b799ea76

Branch: refs/heads/branch-2
Commit: b799ea7641535408c80735b71b6711d87d212800
Parents: c4d3b28
Author: Sangjin Lee 
Authored: Wed Jan 25 15:51:36 2017 -0800
Committer: Sangjin Lee 
Committed: Wed Jan 25 15:52:05 2017 -0800

--
 .../yarn/client/api/SharedCacheClient.java  | 23 +---
 .../client/api/impl/SharedCacheClientImpl.java  | 31 ++--
 .../api/impl/TestSharedCacheClientImpl.java | 37 +---
 3 files changed, 81 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b799ea76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
index 7cbe0e1..60c1bd98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
@@ -55,22 +55,37 @@ public abstract class SharedCacheClient extends 
AbstractService {
* {@link ApplicationId} to identify which application will be using the
* resource.
* 
-   * 
+   *
* 
* The SharedCacheManager responds with whether or not the
* resource exists in the cache. If the resource exists, a Path
* to the resource in the shared cache is returned. If the resource does not
* exist, null is returned instead.
* 
-   * 
+   *
+   * 
+   * Once a path has been returned for a resource, that path is safe to use for
+   * the lifetime of the application that corresponds to the provided
+   * ApplicationId.
+   * 
+   *
+   * 
+   * Additionally, a name for the resource should be specified. A fragment will
+   * be added to the path with the desired name if the desired name is 
different
+   * than the name of the provided path from the shared cache. This ensures 
that
+   * if the returned path is used to create a LocalResource, then the symlink
+   * created during YARN localization will match the name specified.
+   * 
+   *
* @param applicationId ApplicationId of the application using the resource
* @param resourceKey the key (i.e. checksum) that identifies the resource
+   * @param resourceName the desired name of the resource
* @return Path to the resource, or null if it does not exist
*/
   @Public
   @Unstable
-  public abstract Path use(ApplicationId applicationId, String resourceKey)
-  throws YarnException;
+  public abstract Path use(ApplicationId applicationId, String resourceKey,
+  String resourceName) throws YarnException;
 
   /**
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b799ea76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index 0a61ee0..b910c28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -111,8 +113,8 @@ public class SharedCacheClientImpl extends 
SharedCacheClient {
   }
 
   @Override
-  public Path use(ApplicationId applicationId, String resourceKey)
-  throws YarnException {
+  public Path use(ApplicationId applicationId, String resourceKey,
+  String resourceName) throws 

hadoop git commit: YARN-6117. SharedCacheManager does not start up. Contributed by Chris Trezzo.

2017-01-25 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 45407acae -> c4d3b28c2


YARN-6117. SharedCacheManager does not start up. Contributed by Chris Trezzo.

(cherry picked from commit dc6ec9704829f180ce0e182c436fe1a435744c88)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4d3b28c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4d3b28c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4d3b28c

Branch: refs/heads/branch-2
Commit: c4d3b28c2dfb7d3e742cbcd584c786e1c435b404
Parents: 45407ac
Author: Sangjin Lee 
Authored: Mon Jan 23 21:07:25 2017 -0800
Committer: Sangjin Lee 
Committed: Wed Jan 25 15:36:07 2017 -0800

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml  | 1 +
 .../hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep | 0
 2 files changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d3b28c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 64aa275..c7ca779 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -214,6 +214,7 @@
 src/main/resources/webapps/jobhistory/.keep
 src/main/resources/webapps/yarn/.keep
 
src/main/resources/webapps/applicationhistory/.keep
+src/main/resources/webapps/sharedcache/.keep
 src/main/resources/webapps/cluster/.keep
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4d3b28c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
new file mode 100644
index 000..e69de29


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6117. SharedCacheManager does not start up. Contributed by Chris Trezzo.

2017-01-23 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk a2c50127d -> dc6ec9704


YARN-6117. SharedCacheManager does not start up. Contributed by Chris Trezzo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc6ec970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc6ec970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc6ec970

Branch: refs/heads/trunk
Commit: dc6ec9704829f180ce0e182c436fe1a435744c88
Parents: a2c5012
Author: Sangjin Lee 
Authored: Mon Jan 23 21:07:25 2017 -0800
Committer: Sangjin Lee 
Committed: Mon Jan 23 21:07:25 2017 -0800

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml  | 1 +
 .../hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep | 0
 2 files changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc6ec970/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index ca410f6..5707444 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -220,6 +220,7 @@
 src/main/resources/webapps/yarn/.keep
 
src/main/resources/webapps/applicationhistory/.keep
 src/main/resources/webapps/timeline/.keep
+src/main/resources/webapps/sharedcache/.keep
 src/main/resources/webapps/cluster/.keep
 src/main/resources/webapps/test/.keep
 src/main/resources/webapps/proxy/.keep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc6ec970/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/sharedcache/.keep
new file mode 100644
index 000..e69de29


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
new file mode 100644
index 000..7d37206
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class TestSeparator {
+
+  private static String villain = "Dr. Heinz Doofenshmirtz";
+  private static String special =
+  ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
+
+  /**
+   *
+   */
+  @Test
+  public void testEncodeDecodeString() {
+
+for (Separator separator : Separator.values()) {
+  testEncodeDecode(separator, "");
+  testEncodeDecode(separator, " ");
+  testEncodeDecode(separator, "!");
+  testEncodeDecode(separator, "?");
+  testEncodeDecode(separator, "&");
+  testEncodeDecode(separator, "+");
+  testEncodeDecode(separator, "\t");
+  testEncodeDecode(separator, "Dr.");
+  testEncodeDecode(separator, "Heinz");
+  testEncodeDecode(separator, "Doofenshmirtz");
+  testEncodeDecode(separator, villain);
+  testEncodeDecode(separator, special);
+
+  assertNull(separator.encode(null));
+
+}
+  }
+
+  private void testEncodeDecode(Separator separator, String token) {
+String encoded = separator.encode(token);
+String decoded = separator.decode(encoded);
+String msg = "token:" + token + " separator:" + separator + ".";
+assertEquals(msg, token, decoded);
+  }
+
+  @Test
+  public void testEncodeDecode() {
+testEncodeDecode("Dr.", Separator.QUALIFIERS);
+testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
+testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
+Separator.QUALIFIERS);
+testEncodeDecode("", Separator.QUALIFIERS, Separator.VALUES, null);
+testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
+testEncodeDecode("Platypus...", (Separator) null);
+testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
+Separator.VALUES, Separator.SPACE);
+
+  }
+  @Test
+  public void testEncodedValues() {
+testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
+"= no problem!",
+Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, 
Separator.TAB);
+  }
+
+  @Test
+  public void testSplits() {
+byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
+byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
+for (Separator separator : Separator.values()) {
+  String str1 = "cl" + separator.getValue() + "us";
+  String str2 = separator.getValue() + "rst";
+  byte[] sepByteArr = Bytes.toBytes(separator.getValue());
+  byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+  sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
+  byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
+  sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
+  byte[] arr = separator.join(
+  

[16/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-  String clusterId, Configuration hbaseConf, Connection conn)
-  throws IOException {
-byte[] rowKey = appToFlowRowKey.getRowKey();
-Get get = new Get(rowKey);
-Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-if (result != null && !result.isEmpty()) {
-  Object flowName =
-  AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-  Object flowRunId =
-  AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-  Object userId =
-  AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-  if (flowName == null || userId == null || flowRunId == null) {
-throw new NotFoundException(
-"Unable to find the context flow name, and flow run id, "
-+ "and user id for clusterId=" + clusterId
-+ ", appId=" + appToFlowRowKey.getAppId());
-  }
-  return new FlowContext((String)userId, (String)flowName,
-  ((Number)flowRunId).longValue());
-} else {
-  throw new NotFoundException(
-  "Unable to find the context flow name, and flow run id, "
-  + "and user id for clusterId=" + clusterId
-  + ", appId=" + appToFlowRowKey.getAppId());
-}
-  }
-
-  /**
-* Sets certain parameters to defaults if the values are not provided.
-*
-* @param hbaseConf HBase Configuration.
-* @param conn HBase Connection.
-* @throws 

[21/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
new file mode 100644
index 000..7d37206
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class TestSeparator {
+
+  private static String villain = "Dr. Heinz Doofenshmirtz";
+  private static String special =
+  ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
+
+  /**
+   *
+   */
+  @Test
+  public void testEncodeDecodeString() {
+
+for (Separator separator : Separator.values()) {
+  testEncodeDecode(separator, "");
+  testEncodeDecode(separator, " ");
+  testEncodeDecode(separator, "!");
+  testEncodeDecode(separator, "?");
+  testEncodeDecode(separator, "&");
+  testEncodeDecode(separator, "+");
+  testEncodeDecode(separator, "\t");
+  testEncodeDecode(separator, "Dr.");
+  testEncodeDecode(separator, "Heinz");
+  testEncodeDecode(separator, "Doofenshmirtz");
+  testEncodeDecode(separator, villain);
+  testEncodeDecode(separator, special);
+
+  assertNull(separator.encode(null));
+
+}
+  }
+
+  private void testEncodeDecode(Separator separator, String token) {
+String encoded = separator.encode(token);
+String decoded = separator.decode(encoded);
+String msg = "token:" + token + " separator:" + separator + ".";
+assertEquals(msg, token, decoded);
+  }
+
+  @Test
+  public void testEncodeDecode() {
+testEncodeDecode("Dr.", Separator.QUALIFIERS);
+testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
+testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
+Separator.QUALIFIERS);
+testEncodeDecode("", Separator.QUALIFIERS, Separator.VALUES, null);
+testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
+testEncodeDecode("Platypus...", (Separator) null);
+testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
+Separator.VALUES, Separator.SPACE);
+
+  }
+  @Test
+  public void testEncodedValues() {
+testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
+"= no problem!",
+Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, 
Separator.TAB);
+  }
+
+  @Test
+  public void testSplits() {
+byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
+byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
+for (Separator separator : Separator.values()) {
+  String str1 = "cl" + separator.getValue() + "us";
+  String str2 = separator.getValue() + "rst";
+  byte[] sepByteArr = Bytes.toBytes(separator.getValue());
+  byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+  sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
+  byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
+  sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
+  byte[] arr = separator.join(
+  

[15/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index f6904c5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter stringKeyConverter =
-  new StringKeyConverter();
-
-  public GenericEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve 

[06/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
deleted file mode 100644
index da62fdf..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the application table.
- */
-public class ApplicationRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final KeyConverter appRowKeyConverter =
-  new ApplicationRowKeyConverter();
-
-  public ApplicationRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-return appRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return An ApplicationRowKey object.
-   */
-  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
-return new ApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for application table. The row key is of the
-   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
-   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
-   * strings.
-   * 
-   */
-  final private static class ApplicationRowKeyConverter implements
-  KeyConverter {
-
-private final KeyConverter appIDKeyConverter =
-new AppIdKeyConverter();
-
-/**
- * Intended for use in ApplicationRowKey only.
- */
-private ApplicationRowKeyConverter() {
-}
-
-/**
- * Application row key is of the form
- * clusterId!userName!flowName!flowRunId!appId with each segment separated
- * by !. The sizes below indicate sizes of each one of these segements in
- * sequence. clusterId, userName and flowName are strings. flowrunId is a
- * long hence 8 bytes in size. app id is represented as 12 bytes with
- * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
- * bytes(int). Strings are variable in size (i.e. end whenever separator is
- * encountered). This is used while decoding and helps in determining where
- * to split.
- 

[18/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index 93b4b36..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier) {
-this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier, ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes =
-Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-  TypedBufferedMutator tableMutator, Long timestamp,
-  Object inputValue, Attribute... attributes) throws IOException {
-column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-return column.readResult(result, columnQualifierBytes);
-  }
-
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String 

[12/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..b9815eb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,389 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), 

[10/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[20/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
deleted file mode 100644
index da62fdf..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the application table.
- */
-public class ApplicationRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final KeyConverter appRowKeyConverter =
-  new ApplicationRowKeyConverter();
-
-  public ApplicationRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-return appRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return An ApplicationRowKey object.
-   */
-  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
-return new ApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for application table. The row key is of the
-   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
-   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
-   * strings.
-   * 
-   */
-  final private static class ApplicationRowKeyConverter implements
-  KeyConverter {
-
-private final KeyConverter appIDKeyConverter =
-new AppIdKeyConverter();
-
-/**
- * Intended for use in ApplicationRowKey only.
- */
-private ApplicationRowKeyConverter() {
-}
-
-/**
- * Application row key is of the form
- * clusterId!userName!flowName!flowRunId!appId with each segment separated
- * by !. The sizes below indicate sizes of each one of these segements in
- * sequence. clusterId, userName and flowName are strings. flowrunId is a
- * long hence 8 bytes in size. app id is represented as 12 bytes with
- * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
- * bytes(int). Strings are variable in size (i.e. end whenever separator is
- * encountered). This is used while decoding and helps in determining where
- * to split.
- 

[24/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[22/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 000..cedf96a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+super(ctxt, entityFilters, toRetrieve);
+  }
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineDataToRetrieve toRetrieve) {
+super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTable}.
+   */
+  @Override
+  protected BaseTable getTable() {
+return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() 

[17/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], 

[14/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b92089c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b92089c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b92089c0

Branch: refs/heads/YARN-5355
Commit: b92089c0e8ab1b87b8b5b55b1e3d4367ae5d847a
Parents: 0327a79
Author: Sangjin Lee 
Authored: Thu Jan 19 21:21:48 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:21:48 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../TestRMHATimelineCollectors.java |   6 +
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 190 +
 .../reader/filter/TimelineFilterUtils.java  | 307 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 547 ++
 .../storage/TimelineSchemaCreator.java  | 251 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 206 ++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   | 124 
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 167 +
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 389 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 306 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 249 +++
 .../storage/entity/EntityRowKeyPrefix.java  |  77 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 274 +++
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 150 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 

[28/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b92089c0e8ab1b87b8b5b55b1e3d4367ae5d847a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47ec7f92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47ec7f92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47ec7f92

Branch: refs/heads/YARN-5355-branch-2
Commit: 47ec7f927e8b1b1eeb8a2287ae2a7795cab131dd
Parents: e0177c9
Author: Sangjin Lee 
Authored: Thu Jan 19 21:21:48 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:38:38 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../TestRMHATimelineCollectors.java |   6 +
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 190 +
 .../reader/filter/TimelineFilterUtils.java  | 307 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  96 +++
 .../storage/HBaseTimelineWriterImpl.java| 542 ++
 .../storage/TimelineSchemaCreator.java  | 251 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java| 206 ++
 .../storage/apptoflow/AppToFlowRowKey.java  |  58 ++
 .../storage/apptoflow/AppToFlowTable.java   | 124 
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 167 +
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 389 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 306 
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 249 +++
 .../storage/entity/EntityRowKeyPrefix.java  |  77 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 274 +++
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 150 
 .../storage/flow/FlowScanner.java   | 728 

[26/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..b9815eb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,389 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), 

[19/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be 

[25/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or 

[02/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-  String clusterId, Configuration hbaseConf, Connection conn)
-  throws IOException {
-byte[] rowKey = appToFlowRowKey.getRowKey();
-Get get = new Get(rowKey);
-Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-if (result != null && !result.isEmpty()) {
-  Object flowName =
-  AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-  Object flowRunId =
-  AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-  Object userId =
-  AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-  if (flowName == null || userId == null || flowRunId == null) {
-throw new NotFoundException(
-"Unable to find the context flow name, and flow run id, "
-+ "and user id for clusterId=" + clusterId
-+ ", appId=" + appToFlowRowKey.getAppId());
-  }
-  return new FlowContext((String)userId, (String)flowName,
-  ((Number)flowRunId).longValue());
-} else {
-  throw new NotFoundException(
-  "Unable to find the context flow name, and flow run id, "
-  + "and user id for clusterId=" + clusterId
-  + ", appId=" + appToFlowRowKey.getAppId());
-}
-  }
-
-  /**
-* Sets certain parameters to defaults if the values are not provided.
-*
-* @param hbaseConf HBase Configuration.
-* @param conn HBase Connection.
-* @throws 

[08/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 000..cedf96a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+super(ctxt, entityFilters, toRetrieve);
+  }
+
+  public FlowRunEntityReader(TimelineReaderContext ctxt,
+  TimelineDataToRetrieve toRetrieve) {
+super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTable}.
+   */
+  @Override
+  protected BaseTable getTable() {
+return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() 

[27/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY 

[09/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation 

[04/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index 93b4b36..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier) {
-this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily columnFamily,
-  String columnQualifier, ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes =
-Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-  TypedBufferedMutator tableMutator, Long timestamp,
-  Object inputValue, Attribute... attributes) throws IOException {
-column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-return column.readResult(result, columnQualifierBytes);
-  }
-
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String 

[01/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 0327a79d7 -> b92089c0e
  refs/heads/YARN-5355-branch-2 e0177c952 -> 47ec7f927


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index f6904c5..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter stringKeyConverter =
-  new 

[13/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 000..97e5f7b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the 
size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *  without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+// column families should be lower case and not contain any spaces.
+this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY 

[11/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or 

[23/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47ec7f92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation 

[05/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  byte[] qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when 
null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *  responsible to pass a mutator for the table that actually has 
this
-   *  column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *  used.
-   * @param attributes attributes for the mutation that are used by the
-   *  coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *  Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   * store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator tableMutator,
-  String qualifier, Long timestamp, Object inputValue,
-  Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones 
the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be 

[03/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b92089c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 90dd345..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-  AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application 
end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-  AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper column;
-  private final ColumnFamily columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp) {
-this(columnFamily, columnQualifier, aggOp,
-GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily columnFamily,
-  String columnQualifier, AggregationOperation aggOp,
-  ValueConverter converter) {
-this.columnFamily = columnFamily;
-this.columnQualifier = columnQualifier;
-this.aggOp = aggOp;
-// Future-proof by ensuring the right column prefix hygiene.
-this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-.encode(columnQualifier));
-this.column = new ColumnHelper(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], 

[22/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 000..4e1ab8a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,648 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  /**

[02/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index 9ba5e38..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-  new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve, true);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable getTable() {
-return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-Preconditions.checkNotNull(getContext().getClusterId(),
-"clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-  throws IOException {
-createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-  

[16/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index 9ba5e38..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-  new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-super(ctxt, entityFilters, toRetrieve, true);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-  TimelineDataToRetrieve toRetrieve) {
-super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable getTable() {
-return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-Preconditions.checkNotNull(getContext().getClusterId(),
-"clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-  throws IOException {
-createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-  

[17/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 2be6ef8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per 
region.
-   */
-  private final TimestampGenerator timestampGenerator =
-  new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-if (e instanceof RegionCoprocessorEnvironment) {
-  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-  this.region = env.getRegion();
-  isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-  region.getRegionInfo(), env.getConfiguration());
-}
-  }
-
-  public boolean isFlowRunRegion() {
-return isFlowRunRegion;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * 

[27/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[21/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index cccae26..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-switch (op) {
-case AND:
-  return Operator.MUST_PASS_ALL;
-case OR:
-  return Operator.MUST_PASS_ONE;
-default:
-  throw new IllegalArgumentException("Invalid operator");
-}
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-  TimelineCompareOp op) {
-switch (op) {
-case LESS_THAN:
-  return CompareOp.LESS;
-case LESS_OR_EQUAL:
-  return CompareOp.LESS_OR_EQUAL;
-case EQUAL:
-  return CompareOp.EQUAL;
-case NOT_EQUAL:
-  return CompareOp.NOT_EQUAL;
-case GREATER_OR_EQUAL:
-  return CompareOp.GREATER_OR_EQUAL;
-case GREATER_THAN:
-  return CompareOp.GREATER;
-default:
-  throw new IllegalArgumentException("Invalid compare operator");
-}
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static  Filter createHBaseColQualPrefixFilter(
-  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
-return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-new BinaryPrefixComparator(
-colPrefix.getColumnPrefixBytes(filter.getPrefix(;
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param  Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column 

[18/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index ff22178..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final String entityId;
-  private final KeyConverter entityRowKeyConverter =
-  new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId, String entityType, String entityId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-this.entityType = entityType;
-this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  public String getEntityType() {
-return entityType;
-  }
-
-  public String getEntityId() {
-return entityId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An EntityRowKey object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * 
-   */
-  final private static class EntityRowKeyConverter implements
-  KeyConverter {
-
-private final AppIdKeyConverter appIDKeyConverter = new 
AppIdKeyConverter();
-
-private EntityRowKeyConverter() {
-}
-
-/**
- * Entity row key is of the form
- * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
- * segment separated by !. The sizes below indicate sizes of each one of
- * these segments in sequence. clusterId, userName, flowName, entityType 
and
- * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
- * is represented as 12 

[14/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b01514f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b01514f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b01514f6

Branch: refs/heads/trunk
Commit: b01514f65bc6090a50a583f67d1ecb5d74b6d276
Parents: 60865c8
Author: Sangjin Lee 
Authored: Thu Jan 19 20:52:55 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 20:52:55 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 ++
 .../storage/flow/package-info.java  |  29 +
 

[04/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index ff22178..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final String entityId;
-  private final KeyConverter entityRowKeyConverter =
-  new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-  Long flowRunId, String appId, String entityType, String entityId) {
-this.clusterId = clusterId;
-this.userId = userId;
-this.flowName = flowName;
-this.flowRunId = flowRunId;
-this.appId = appId;
-this.entityType = entityType;
-this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-return clusterId;
-  }
-
-  public String getUserId() {
-return userId;
-  }
-
-  public String getFlowName() {
-return flowName;
-  }
-
-  public Long getFlowRunId() {
-return flowRunId;
-  }
-
-  public String getAppId() {
-return appId;
-  }
-
-  public String getEntityType() {
-return entityType;
-  }
-
-  public String getEntityId() {
-return entityId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An EntityRowKey object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * 
-   */
-  final private static class EntityRowKeyConverter implements
-  KeyConverter {
-
-private final AppIdKeyConverter appIDKeyConverter = new 
AppIdKeyConverter();
-
-private EntityRowKeyConverter() {
-}
-
-/**
- * Entity row key is of the form
- * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
- * segment separated by !. The sizes below indicate sizes of each one of
- * these segments in sequence. clusterId, userName, flowName, entityType 
and
- * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
- * is represented as 12 

[10/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[28/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
YARN-5928. Move ATSv2 HBase backend code into a new module that is only 
dependent at runtime by yarn servers. Contributed by Haibo Chen.

(cherry picked from commit b01514f65bc6090a50a583f67d1ecb5d74b6d276)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a925cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a925cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a925cb8

Branch: refs/heads/branch-3.0.0-alpha2
Commit: 9a925cb8e8f438d29934043ceabb4c0066279cd5
Parents: 18f6406
Author: Sangjin Lee 
Authored: Thu Jan 19 20:52:55 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 21:00:24 2017 -0800

--
 hadoop-project/pom.xml  |  13 +-
 .../server/resourcemanager/TestRMRestart.java   |   4 +
 .../TestResourceTrackerService.java |   4 +
 .../pom.xml |  12 +
 .../pom.xml | 191 +
 .../reader/filter/TimelineFilterUtils.java  | 290 
 .../reader/filter/package-info.java |  28 +
 .../storage/HBaseTimelineReaderImpl.java|  88 +++
 .../storage/HBaseTimelineWriterImpl.java| 566 ++
 .../storage/TimelineSchemaCreator.java  | 250 +++
 .../storage/application/ApplicationColumn.java  | 156 
 .../application/ApplicationColumnFamily.java|  65 ++
 .../application/ApplicationColumnPrefix.java| 288 
 .../storage/application/ApplicationRowKey.java  | 206 ++
 .../application/ApplicationRowKeyPrefix.java|  69 ++
 .../storage/application/ApplicationTable.java   | 161 
 .../storage/application/package-info.java   |  28 +
 .../storage/apptoflow/AppToFlowColumn.java  | 148 
 .../apptoflow/AppToFlowColumnFamily.java|  51 ++
 .../storage/apptoflow/AppToFlowRowKey.java  | 143 
 .../storage/apptoflow/AppToFlowTable.java   | 113 +++
 .../storage/apptoflow/package-info.java |  28 +
 .../storage/common/AppIdKeyConverter.java   |  96 +++
 .../storage/common/BaseTable.java   | 140 
 .../common/BufferedMutatorDelegator.java|  73 ++
 .../timelineservice/storage/common/Column.java  |  80 ++
 .../storage/common/ColumnFamily.java|  34 +
 .../storage/common/ColumnHelper.java| 388 ++
 .../storage/common/ColumnPrefix.java| 145 
 .../storage/common/EventColumnName.java |  63 ++
 .../common/EventColumnNameConverter.java|  99 +++
 .../storage/common/GenericConverter.java|  48 ++
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/KeyConverter.java|  41 ++
 .../storage/common/LongConverter.java   |  94 +++
 .../storage/common/LongKeyConverter.java|  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java|  42 ++
 .../storage/common/Separator.java   | 575 +++
 .../storage/common/StringKeyConverter.java  |  54 ++
 .../common/TimelineHBaseSchemaConstants.java|  71 ++
 .../storage/common/TimestampGenerator.java  | 116 +++
 .../storage/common/TypedBufferedMutator.java|  28 +
 .../storage/common/ValueConverter.java  |  47 ++
 .../storage/common/package-info.java|  28 +
 .../storage/entity/EntityColumn.java| 160 
 .../storage/entity/EntityColumnFamily.java  |  65 ++
 .../storage/entity/EntityColumnPrefix.java  | 300 
 .../storage/entity/EntityRowKey.java| 225 ++
 .../storage/entity/EntityRowKeyPrefix.java  |  74 ++
 .../storage/entity/EntityTable.java | 161 
 .../storage/entity/package-info.java|  28 +
 .../flow/AggregationCompactionDimension.java|  63 ++
 .../storage/flow/AggregationOperation.java  |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 277 +++
 .../storage/flow/FlowActivityRowKey.java| 196 +
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java | 108 +++
 .../storage/flow/FlowRunColumn.java | 182 +
 .../storage/flow/FlowRunColumnFamily.java   |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java   | 268 +++
 .../storage/flow/FlowRunCoprocessor.java| 304 
 .../storage/flow/FlowRunRowKey.java | 190 +
 .../storage/flow/FlowRunRowKeyPrefix.java   |  54 ++
 .../storage/flow/FlowRunTable.java  | 141 
 .../storage/flow/FlowScanner.java   | 728 +++
 .../storage/flow/FlowScannerOperation.java  |  46 

[20/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index a02f768..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * 
- * |-|
- * |  Row   | Column Family| Column Family| Column Family|
- * |  key   | info | metrics  | config   |
- * |-|
- * | clusterId! | id:appId | metricId1:   | configKey1:  |
- * | userName!  |  | metricValue1 | configValue1 |
- * | flowName!  | created_time:| @timestamp1  |  |
- * | flowRunId! | 1392993084018|  | configKey2:  |
- * | AppId  |  | metriciD1:   | configValue2 |
- * || i!infoKey:   | metricValue2 |  |
- * || infoValue| @timestamp2  |  |
- * ||  |  |  |
- * || r!relatesToKey:  | metricId2:   |  |
- * || id3=id4=id5  | metricValue1 |  |
- * ||  | @timestamp2  |  |
- * || s!isRelatedToKey:|  |  |
- * || id7=id9=id6  |  |  |
- * ||  |  |  |
- * || e!eventId=timestamp=infoKey: |  |  |
- * || eventInfoValue   |  |  |
- * ||  |  |  |
- * || flowVersion: |  |  |
- * || versionValue |  |  |
- * |-|
- * 
- */
-public class ApplicationTable extends BaseTable {
-  /** application prefix. */
-  private static final String PREFIX =
-  

[26/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..be55db5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), 

[24/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 000..bb77e36
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final KeyConverter flowActivityRowKeyConverter =
+  new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+  String flowName) {
+this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *  top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *  timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+  String flowName, boolean convertDayTsToTopOfDay) {
+this.clusterId = clusterId;
+if (convertDayTsToTopOfDay && (timestamp != null)) {
+  this.dayTs = 
HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
+} else {
+  this.dayTs = timestamp;
+}
+this.userId = userId;
+this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+return dayTs;
+  }
+
+  public String getUserId() {
+return userId;
+  }
+
+  public String getFlowName() {
+return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A FlowActivityRowKey object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * 
+   */
+  final private static class FlowActivityRowKeyConverter implements
+   

[15/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 5beb189..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.junit.Test;
-
-
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-  .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-  + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-long runid = Long.MAX_VALUE - 900L;
-byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-byte[] byteArr = Bytes.toBytes(runid);
-int sepByteLen = QUALIFIER_SEP_BYTES.length;
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-  }
-}
-FLOW_RUN_ID = Bytes.toLong(byteArr);
-long clusterTs = System.currentTimeMillis();
-byteArr = Bytes.toBytes(clusterTs);
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[byteArr.length - sepByteLen + i] =
-(byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-QUALIFIER_SEP_BYTES[i]);
-  }
-}
-clusterTs = Bytes.toLong(byteArr);
-int seqId = 222;
-APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-int sepLen = QUALIFIER_SEP_BYTES.length;
-for (int i = 0; i < sepLen; i++) {
-  assertTrue(
-  "Row key prefix not encoded properly.",
-  byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-  QUALIFIER_SEP_BYTES[i]);
-}
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-byte[] byteRowKey =
-new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-APPLICATION_ID).getRowKey();
-ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
-assertEquals(CLUSTER, rowKey.getClusterId());
-assertEquals(USER, rowKey.getUserId());
-assertEquals(FLOW_NAME, 

[06/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index a02f768..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * 
- * |-|
- * |  Row   | Column Family| Column Family| Column Family|
- * |  key   | info | metrics  | config   |
- * |-|
- * | clusterId! | id:appId | metricId1:   | configKey1:  |
- * | userName!  |  | metricValue1 | configValue1 |
- * | flowName!  | created_time:| @timestamp1  |  |
- * | flowRunId! | 1392993084018|  | configKey2:  |
- * | AppId  |  | metriciD1:   | configValue2 |
- * || i!infoKey:   | metricValue2 |  |
- * || infoValue| @timestamp2  |  |
- * ||  |  |  |
- * || r!relatesToKey:  | metricId2:   |  |
- * || id3=id4=id5  | metricValue1 |  |
- * ||  | @timestamp2  |  |
- * || s!isRelatedToKey:|  |  |
- * || id7=id9=id6  |  |  |
- * ||  |  |  |
- * || e!eventId=timestamp=infoKey: |  |  |
- * || eventInfoValue   |  |  |
- * ||  |  |  |
- * || flowVersion: |  |  |
- * || versionValue |  |  |
- * |-|
- * 
- */
-public class ApplicationTable extends BaseTable {
-  /** application prefix. */
-  private static final String PREFIX =
-  

[23/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation 

[08/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 000..4e1ab8a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,648 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  /**

[12/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 000..be55db5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,388 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ *
+ * @param  refers to the table.
+ */
+public class ColumnHelper {
+  private static final Log LOG = LogFactory.getLog(ColumnHelper.class);
+
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Local copy of bytes representation of columnFamily so that we can avoid
+   * cloning a new copy over and over.
+   */
+  private final byte[] columnFamilyBytes;
+
+  private final ValueConverter converter;
+
+  public ColumnHelper(ColumnFamily columnFamily) {
+this(columnFamily, GenericConverter.getInstance());
+  }
+
+  public ColumnHelper(ColumnFamily columnFamily, ValueConverter converter) {
+this.columnFamily = columnFamily;
+columnFamilyBytes = columnFamily.getBytes();
+if (converter == null) {
+  this.converter = GenericConverter.getInstance();
+} else {
+  this.converter = converter;
+}
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent 
over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *  identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *  used to modify the underlying HBase table
+   * @param columnQualifier
+   *  column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *  version timestamp. When null the current timestamp multiplied 
with
+   *  TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *  app id will be used
+   * @param inputValue
+   *  the value to write to the rowKey and column qualifier. Nothing
+   *  gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *  mutation to table).
+   */
+  public void store(byte[] rowKey, TypedBufferedMutator tableMutator,
+  byte[] columnQualifier, Long timestamp, Object inputValue,
+  Attribute... attributes) throws IOException {
+if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) 
{
+  return;
+}
+Put p = new Put(rowKey);
+timestamp = getPutTimestamp(timestamp, attributes);
+p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+converter.encodeValue(inputValue));
+if ((attributes != null) && (attributes.length > 0)) {
+  for (Attribute attribute : attributes) {
+p.setAttribute(attribute.getName(), 

[09/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 000..0e3c8ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,728 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the 
stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Log LOG = LogFactory.getLog(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_000_";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner 
internalScanner,
+  FlowScannerOperation action) {
+this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+  InternalScanner internalScanner, FlowScannerOperation 

[01/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha2 18f64065d -> 9a925cb8e
  refs/heads/trunk 60865c8ea -> b01514f65


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 5beb189..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.junit.Test;
-
-
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-  .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-  + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-long runid = Long.MAX_VALUE - 900L;
-byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-byte[] byteArr = Bytes.toBytes(runid);
-int sepByteLen = QUALIFIER_SEP_BYTES.length;
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-  }
-}
-FLOW_RUN_ID = Bytes.toLong(byteArr);
-long clusterTs = System.currentTimeMillis();
-byteArr = Bytes.toBytes(clusterTs);
-if (sepByteLen <= byteArr.length) {
-  for (int i = 0; i < sepByteLen; i++) {
-byteArr[byteArr.length - sepByteLen + i] =
-(byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-QUALIFIER_SEP_BYTES[i]);
-  }
-}
-clusterTs = Bytes.toLong(byteArr);
-int seqId = 222;
-APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-int sepLen = QUALIFIER_SEP_BYTES.length;
-for (int i = 0; i < sepLen; i++) {
-  assertTrue(
-  "Row key prefix not encoded properly.",
-  byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-  QUALIFIER_SEP_BYTES[i]);
-}
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-byte[] byteRowKey =
-new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-APPLICATION_ID).getRowKey();
-ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);

[25/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or 

[13/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 000..42488f4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper column;
+  private final ColumnFamily columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix) {
+this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily columnFamily,
+  String columnPrefix, ValueConverter converter) {
+column = new ColumnHelper(columnFamily, converter);
+this.columnFamily = 

[11/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 000..8e6c259
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their 
own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+  Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+  Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+  Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+  Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+  Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+  Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+  Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+  Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+  Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+  Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+byte[][] kloon = USERNAME_SPLITS.clone();
+// Deep copy.
+for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+  kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+}
+return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 000..d03b37d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or 

[07/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index cccae26..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-switch (op) {
-case AND:
-  return Operator.MUST_PASS_ALL;
-case OR:
-  return Operator.MUST_PASS_ONE;
-default:
-  throw new IllegalArgumentException("Invalid operator");
-}
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-  TimelineCompareOp op) {
-switch (op) {
-case LESS_THAN:
-  return CompareOp.LESS;
-case LESS_OR_EQUAL:
-  return CompareOp.LESS_OR_EQUAL;
-case EQUAL:
-  return CompareOp.EQUAL;
-case NOT_EQUAL:
-  return CompareOp.NOT_EQUAL;
-case GREATER_OR_EQUAL:
-  return CompareOp.GREATER_OR_EQUAL;
-case GREATER_THAN:
-  return CompareOp.GREATER;
-default:
-  throw new IllegalArgumentException("Invalid compare operator");
-}
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static  Filter createHBaseColQualPrefixFilter(
-  ColumnPrefix colPrefix, TimelinePrefixFilter filter) {
-return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-new BinaryPrefixComparator(
-colPrefix.getColumnPrefixBytes(filter.getPrefix(;
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param  Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column 

[05/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index e93b470..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Log LOG =
-  LogFactory.getLog(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static 

[19/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a925cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index e93b470..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 8640L;
-  private static final Log LOG =
-  LogFactory.getLog(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-Attribute[] combinedAttributes = new Attribute[newLength];
-
-if (attributes != null) {
-  System.arraycopy(attributes, 0, combinedAttributes, 0, 
attributes.length);
-}
-
-if (aggOp != null) {
-  Attribute a2 = aggOp.getAttribute();
-  combinedAttributes[newLength - 1] = a2;
-}
-return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-  AggregationOperation aggOp) {
-int oldLength = getAttributesLength(attributes);
-int aggLength = getAppOpLength(aggOp);
-return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-if (aggOp != null) {
-  return 1;
-}
-return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-if (attributes != null) {
-  return attributes.length;
-}
-return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static 

[03/28] hadoop git commit: YARN-5928. Move ATSv2 HBase backend code into a new module that is only dependent at runtime by yarn servers. Contributed by Haibo Chen.

2017-01-19 Thread sjlee
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b01514f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 2be6ef8..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per 
region.
-   */
-  private final TimestampGenerator timestampGenerator =
-  new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-if (e instanceof RegionCoprocessorEnvironment) {
-  RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-  this.region = env.getRegion();
-  isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-  region.getRegionInfo(), env.getConfiguration());
-}
-  }
-
-  public boolean isFlowRunRegion() {
-return isFlowRunRegion;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a 
new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * 

[1/2] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-01-19 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 c9246f619 -> 0327a79d7
  refs/heads/YARN-5355-branch-2 c11078fd2 -> e0177c952


YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0327a79d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0327a79d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0327a79d

Branch: refs/heads/YARN-5355
Commit: 0327a79d79a4d56d9c7cb6889886afd2272b07d3
Parents: c9246f6
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 14:52:47 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8752e5d..e45bfe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2047,6 +2047,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6f62fd8..16954a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2243,6 +2243,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0327a79d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 

[2/2] hadoop git commit: YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed by Vrushali C.

2017-01-19 Thread sjlee
YARN-6094. Update the coprocessor to be a dynamically loaded one. Contributed 
by Vrushali C.

(cherry picked from commit 0327a79d79a4d56d9c7cb6889886afd2272b07d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0177c95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0177c95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0177c95

Branch: refs/heads/YARN-5355-branch-2
Commit: e0177c952c3b64c8dcf0408562faa98f725280e0
Parents: c11078f
Author: Sangjin Lee 
Authored: Thu Jan 19 14:52:47 2017 -0800
Committer: Sangjin Lee 
Committed: Thu Jan 19 14:55:55 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../src/main/resources/yarn-default.xml |   9 +
 ...stTimelineReaderWebServicesHBaseStorage.java |   4 +-
 .../storage/DataGeneratorForTest.java   | 364 ---
 .../storage/TestHBaseTimelineStorageApps.java   |   6 +-
 .../TestHBaseTimelineStorageEntities.java   |   6 +-
 .../storage/TestHBaseTimelineStorageSchema.java |  12 +-
 .../storage/flow/TestFlowDataGenerator.java |  28 +-
 .../flow/TestHBaseStorageFlowActivity.java  |   8 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  46 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   8 +-
 .../storage/flow/FlowRunCoprocessor.java|  36 +-
 .../storage/flow/FlowRunTable.java  |  33 +-
 .../src/site/markdown/TimelineServiceV2.md  |  26 +-
 14 files changed, 322 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 659b5eb..e1ecaf9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2058,6 +2058,18 @@ public class YarnConfiguration extends Configuration {
   + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+  TIMELINE_SERVICE_PREFIX
+  + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+  "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+/**
* The name for setting that points to an optional HBase configuration
* (hbase-site.xml file) with settings that will override the ones found on
* the classpath.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 416d705..a21ee2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2272,6 +2272,15 @@
 
   
 
+The default hdfs location for flowrun coprocessor jar.
+
+yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+
+/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+  
+
+  
+
 The value of this parameter sets the prefix for all tables that are part of
 timeline service in the hbase storage schema. It can be set to "dev."
 or "staging." if it is to be used for development or staging instances.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0177c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 

[1/2] hadoop git commit: YARN-5980. Update documentation for single node hbase deploy. Contributed by Vrushali C.

2017-01-13 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 8df6f98e5 -> e1bdba778
  refs/heads/YARN-5355-branch-2 cf7f9e91f -> 8d1e41407


YARN-5980. Update documentation for single node hbase deploy. Contributed by 
Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1bdba77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1bdba77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1bdba77

Branch: refs/heads/YARN-5355
Commit: e1bdba77888723b435a235a96c8659029afd25d5
Parents: 8df6f98
Author: Sangjin Lee 
Authored: Fri Jan 13 09:12:48 2017 -0800
Committer: Sangjin Lee 
Committed: Fri Jan 13 09:12:48 2017 -0800

--
 .../src/site/markdown/TimelineServiceV2.md  | 63 +---
 1 file changed, 55 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1bdba77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 9a06b47..0d77f2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -165,18 +165,64 @@ New configuration parameters that are introduced with v.2 
are marked bold.
 ### Enabling Timeline Service v.2
 
  Preparing Apache HBase cluster for storage
+There are a few steps to be done for preparing the storage for Timeline 
Service v.2:
+
+Step 1) [Set up the HBase cluster](#Set_up_the_HBase_cluster)
+
+Step 2) [Enable the coprocessor](#Enable_the_coprocessor)
+
+Step 3) [Create the schema for Timeline Service v.2](#Create_schema)
+
+Each step is explained in more detail below.
+
+#  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.1.x. 
The 1.0.x versions
-do not work with Timeline Service v.2. The 1.2.x versions have not been tested.
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.4. 
The 1.0.x versions
+do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
+Timeline Service.
+
+HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
+mode that is suitable for your setup.
+(http://hbase.apache.org/book.html#standalone_dist)
+
+# Simple deployment for HBase
+If you are intent on a simple deploy profile for the Apache HBase cluster
+where the data loading is light but the data needs to persist across node
+comings and goings, you could consider the "Standalone HBase over HDFS" deploy 
mode.
+
+This is a useful variation on the standalone HBase setup and has all HBase 
daemons running inside
+one JVM but rather than persisting to the local filesystem, it persists to an 
HDFS instance.
+Writing to HDFS where data is replicated ensures that data is persisted across 
node
+comings and goings. To configure this standalone variant, edit your 
`hbase-site.xml` setting
+the `hbase.rootdir` to point at a directory in your HDFS instance but then set
+`hbase.cluster.distributed` to false. For example:
+
+```
+
+  
+hbase.rootdir
+hdfs://namenode.example.org:8020/hbase
+  
+  
+hbase.cluster.distributed
+false
+  
+
+```
+
+For more details on this mode, refer to
+http://hbase.apache.org/book.html#standalone.over.hdfs .
+
+Once you have an Apache HBase cluster ready to use, perform the following 
steps.
 
-Once you have an Apache HBase cluster ready to use for this purpose, perform 
the following steps.
+#  Step 2) Enable the coprocessor
 
-First, add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
+Step 2.1) Add the timeline service jar to the HBase classpath in all HBase 
machines in the cluster. It
 is needed for the coprocessor as well as the schema creator. For example,
 
 cp hadoop-yarn-server-timelineservice-3.0.0-alpha1-SNAPSHOT.jar 
/usr/hbase/lib/
 
-Then, enable the coprocessor that handles the aggregation. To enable it, add 
the following entry in
+Step 2.2) Enable the coprocessor that handles the aggregation. To enable it, 
add the following entry in
 region servers' `hbase-site.xml` file (generally located in the `conf` 
directory) as follows:
 
 ```
@@ -186,10 +232,11 @@ region servers' `hbase-site.xml` file (generally located 
in the `conf` directory
 
 ```
 
-Restart the region servers and the master to pick up the 

[2/2] hadoop git commit: YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.

2016-12-21 Thread sjlee
YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.

(cherry picked from commit f945008d1cf5730bdebeae501ed0e42477ad219e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9bcfbf5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9bcfbf5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9bcfbf5e

Branch: refs/heads/YARN-5355-branch-2
Commit: 9bcfbf5ec172715c652d7dfe4900fd73631139bb
Parents: cf8e3a8
Author: Sangjin Lee 
Authored: Wed Dec 21 09:53:07 2016 -0800
Committer: Sangjin Lee 
Committed: Wed Dec 21 10:04:03 2016 -0800

--
 LICENSE.txt |   8 +-
 hadoop-project/pom.xml  |  26 +-
 .../pom.xml | 142 +---
 ...TestPhoenixOfflineAggregationWriterImpl.java | 161 -
 .../hadoop-yarn-server-timelineservice/pom.xml  |  26 +-
 .../PhoenixOfflineAggregationWriterImpl.java| 358 ---
 .../storage/TimelineSchemaCreator.java  |  22 --
 7 files changed, 18 insertions(+), 725 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bcfbf5e/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index ee5d528..fee4ae4 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1542,12 +1542,6 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
-StringTemplate 4 4.0.7
-ANTLR 3 Tool 3.5
-ANTLR 3 Runtime 3.5
-ANTLR StringTemplate 3.2.1
-ASM All 5.0.2
-sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1778,7 +1772,7 @@ the Licensor and You.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-jamon-runtime 2.3.1
+jamon-runtime 2.4.1
 

   MOZILLA PUBLIC LICENSE
 Version 1.1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bcfbf5e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 771d3e4..6c6de28 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -50,8 +50,7 @@
 
 0.8.2.1
 
-1.1.3
-4.7.0-HBase-1.1
+1.2.4
 2.5.1
 
 ${project.version}
@@ -1095,29 +1094,6 @@
 tests
  
   
-org.apache.phoenix
-phoenix-core
-${phoenix.version}
-
-  
-  
-jline
-jline
-  
- 
-joda-time
-joda-time
-  
-
-  
-  
-org.apache.phoenix
-phoenix-core
-test-jar
-${phoenix.version}
-test
-  
-  
 org.apache.hbase
 hbase-it
 ${hbase.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9bcfbf5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index c627112..ed014de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -56,10 +56,6 @@
   org.apache.hadoop
   hadoop-common
 
-
-  org.apache.phoenix
-  phoenix-core
-
   
 
 
@@ -76,6 +72,8 @@
   
 
 
+
 
   org.apache.hadoop
   hadoop-auth
@@ -111,20 +109,6 @@
   
 
 
-
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-  test
-  
-
-  org.apache.hadoop
-  hadoop-common
-
-  
-
-
 
   org.apache.hadoop
   hadoop-yarn-server-applicationhistoryservice
@@ -145,14 +129,14 @@
 
 
   com.sun.jersey
-  jersey-core
+  jersey-client
   test
 
 
 
-  com.sun.jersey
-  jersey-client
-  test
+  javax.ws.rs
+  jsr311-api
+  1.1.1
 
 
 
@@ -225,23 +209,6 @@
 
 
   org.apache.hbase
-  hbase-common
-  tests
-  test
-  
-
-  org.apache.hadoop
-  hadoop-common
-
-

[1/2] hadoop git commit: YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.

2016-12-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 8288030cb -> f945008d1
  refs/heads/YARN-5355-branch-2 cf8e3a8f3 -> 9bcfbf5ec


YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f945008d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f945008d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f945008d

Branch: refs/heads/YARN-5355
Commit: f945008d1cf5730bdebeae501ed0e42477ad219e
Parents: 8288030
Author: Sangjin Lee 
Authored: Wed Dec 21 09:53:07 2016 -0800
Committer: Sangjin Lee 
Committed: Wed Dec 21 09:53:07 2016 -0800

--
 LICENSE.txt |   8 +-
 hadoop-project/pom.xml  |  26 +-
 .../pom.xml | 144 +---
 ...TestPhoenixOfflineAggregationWriterImpl.java | 161 -
 .../hadoop-yarn-server-timelineservice/pom.xml  |  26 +-
 .../PhoenixOfflineAggregationWriterImpl.java| 358 ---
 .../storage/TimelineSchemaCreator.java  |  22 --
 7 files changed, 20 insertions(+), 725 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f945008d/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 252968f..e7202bd 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1643,12 +1643,6 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
-StringTemplate 4 4.0.7
-ANTLR 3 Tool 3.5
-ANTLR 3 Runtime 3.5
-ANTLR StringTemplate 3.2.1
-ASM All 5.0.2
-sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1879,7 +1873,7 @@ the Licensor and You.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-jamon-runtime 2.3.1
+jamon-runtime 2.4.1
 

   MOZILLA PUBLIC LICENSE
 Version 1.1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f945008d/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ca567c5..887e8cb 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -49,8 +49,7 @@
 2.11.0
 
 0.8.2.1
-1.1.3
-4.7.0-HBase-1.1
+1.2.4
 2.5.1
 
 ${project.version}
@@ -1152,29 +1151,6 @@
 tests
   
   
-org.apache.phoenix
-phoenix-core
-${phoenix.version}
-
-  
-  
-jline
-jline
-  
-  
-joda-time
-joda-time
-  
-
-  
-  
-org.apache.phoenix
-phoenix-core
-test-jar
-${phoenix.version}
-test
-  
-  
 org.apache.hbase
 hbase-it
 ${hbase.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f945008d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index c4786c7..ed8d09a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -56,10 +56,6 @@
   org.apache.hadoop
   hadoop-common
 
-
-  org.apache.phoenix
-  phoenix-core
-
   
 
 
@@ -80,6 +76,8 @@
   
 
 
+
 
   org.apache.hadoop
   hadoop-auth
@@ -115,20 +113,6 @@
   
 
 
-
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-  test
-  
-
-  org.apache.hadoop
-  hadoop-common
-
-  
-
-
 
   org.apache.hadoop
   hadoop-yarn-server-applicationhistoryservice
@@ -149,14 +133,14 @@
 
 
   com.sun.jersey
-  jersey-core
+  jersey-client
   test
 
 
 
-  com.sun.jersey
-  jersey-client
-  test
+  javax.ws.rs
+  jsr311-api
+  1.1.1
 
 
 
@@ -229,23 +213,6 @@
 
 
   org.apache.hbase
-  hbase-common
-  tests
-  test
-  
-

hadoop git commit: YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.

2016-12-21 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk f6e2521eb -> 8b042bc1e


YARN-5976. Update hbase version to 1.2. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b042bc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b042bc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b042bc1

Branch: refs/heads/trunk
Commit: 8b042bc1e6ae5e18d435d6a184dec1811cc3a513
Parents: f6e2521
Author: Sangjin Lee 
Authored: Wed Dec 21 09:43:17 2016 -0800
Committer: Sangjin Lee 
Committed: Wed Dec 21 09:43:17 2016 -0800

--
 LICENSE.txt |   8 +-
 hadoop-project/pom.xml  |  26 +-
 .../pom.xml | 142 +---
 ...TestPhoenixOfflineAggregationWriterImpl.java | 161 -
 .../hadoop-yarn-server-timelineservice/pom.xml  |  30 +-
 .../PhoenixOfflineAggregationWriterImpl.java| 358 ---
 .../storage/TimelineSchemaCreator.java  |  22 --
 7 files changed, 27 insertions(+), 720 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 2183f0e..fd07edf 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1643,12 +1643,6 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
-StringTemplate 4 4.0.7
-ANTLR 3 Tool 3.5
-ANTLR 3 Runtime 3.5
-ANTLR StringTemplate 3.2.1
-ASM All 5.0.2
-sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1879,7 +1873,7 @@ the Licensor and You.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-jamon-runtime 2.3.1
+jamon-runtime 2.4.1
 

   MOZILLA PUBLIC LICENSE
 Version 1.1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c9ee793..a935292 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -49,8 +49,7 @@
 2.11.0
 
 0.8.2.1
-1.1.3
-4.7.0-HBase-1.1
+1.2.4
 2.5.1
 
 ${project.version}
@@ -1219,29 +1218,6 @@
 tests
   
   
-org.apache.phoenix
-phoenix-core
-${phoenix.version}
-
-  
-  
-jline
-jline
-  
-  
-joda-time
-joda-time
-  
-
-  
-  
-org.apache.phoenix
-phoenix-core
-test-jar
-${phoenix.version}
-test
-  
-  
 org.apache.hbase
 hbase-it
 ${hbase.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b042bc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index 026ef75..f151e1d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -55,10 +55,6 @@
   org.apache.hadoop
   hadoop-common
 
-
-  org.apache.phoenix
-  phoenix-core
-
   
 
 
@@ -79,6 +75,8 @@
   
 
 
+
 
   org.apache.hadoop
   hadoop-auth
@@ -118,18 +116,6 @@
  dependency -->
 
   org.apache.hadoop
-  hadoop-yarn-server-common
-  test
-  
-
-  org.apache.hadoop
-  hadoop-common
-
-  
-
-
-
-  org.apache.hadoop
   hadoop-yarn-server-applicationhistoryservice
   test
   
@@ -148,14 +134,14 @@
 
 
   com.sun.jersey
-  jersey-core
+  jersey-client
   test
 
 
 
-  com.sun.jersey
-  jersey-client
-  test
+  javax.ws.rs
+  jsr311-api
+  1.1.1
 
 
 
@@ -228,23 +214,6 @@
 
 
   org.apache.hbase
-  hbase-common
-  tests
-  test
-  
-
-  org.apache.hadoop
-  

[1/2] hadoop git commit: YARN-5925. Extract hbase-backend-exclusive utility methods from TimelineStorageUtil. Contributed by Haibo Chen.

2016-12-09 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 6217b87f4 -> 8288030cb
  refs/heads/YARN-5355-branch-2 385d8fae8 -> cf8e3a8f3


YARN-5925. Extract hbase-backend-exclusive utility methods from 
TimelineStorageUtil. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8288030c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8288030c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8288030c

Branch: refs/heads/YARN-5355
Commit: 8288030cb4aa3b5a9425cc0a3f6df03a3eae1dfb
Parents: 6217b87
Author: Sangjin Lee 
Authored: Fri Dec 9 16:30:49 2016 -0800
Committer: Sangjin Lee 
Committed: Fri Dec 9 16:30:49 2016 -0800

--
 ...stTimelineReaderWebServicesHBaseStorage.java |   6 +-
 .../flow/TestHBaseStorageFlowActivity.java  |  12 +-
 .../flow/TestHBaseStorageFlowRunCompaction.java |  44 +--
 .../storage/HBaseTimelineReaderImpl.java|   4 +-
 .../storage/HBaseTimelineWriterImpl.java|   4 +-
 .../storage/TimelineSchemaCreator.java  |   4 +-
 .../storage/common/AppIdKeyConverter.java   |   5 +-
 .../common/HBaseTimelineStorageUtils.java   | 306 +++
 .../storage/common/LongConverter.java   |   2 +-
 .../storage/common/TimelineStorageUtils.java| 265 
 .../storage/flow/FlowActivityColumnPrefix.java  |  10 +-
 .../storage/flow/FlowActivityRowKey.java|   4 +-
 .../storage/flow/FlowRunColumn.java |   6 +-
 .../storage/flow/FlowRunColumnPrefix.java   |   6 +-
 .../storage/flow/FlowRunCoprocessor.java|   4 +-
 .../storage/flow/FlowScanner.java   |  13 +-
 .../storage/reader/EntityTypeReader.java|   6 +-
 .../storage/common/TestRowKeys.java |   2 +-
 18 files changed, 374 insertions(+), 329 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8288030c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 2ed5d96..db1c1cf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -78,7 +78,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   private static HBaseTestingUtility util;
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -984,7 +984,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   assertEquals(1, entities.size());
 
   long firstFlowActivity =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
 
   DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
   uri = URI.create("http://localhost:; + serverPort + "/ws/v2/" +


[2/2] hadoop git commit: YARN-5925. Extract hbase-backend-exclusive utility methods from TimelineStorageUtil. Contributed by Haibo Chen.

2016-12-09 Thread sjlee
YARN-5925. Extract hbase-backend-exclusive utility methods from 
TimelineStorageUtil. Contributed by Haibo Chen.

(cherry picked from commit 8288030cb4aa3b5a9425cc0a3f6df03a3eae1dfb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf8e3a8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf8e3a8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf8e3a8f

Branch: refs/heads/YARN-5355-branch-2
Commit: cf8e3a8f399d1cffd73dd1fbb001887340330f17
Parents: 385d8fa
Author: Sangjin Lee 
Authored: Fri Dec 9 16:30:49 2016 -0800
Committer: Sangjin Lee 
Committed: Fri Dec 9 16:31:28 2016 -0800

--
 ...stTimelineReaderWebServicesHBaseStorage.java |   6 +-
 .../flow/TestHBaseStorageFlowActivity.java  |  12 +-
 .../flow/TestHBaseStorageFlowRunCompaction.java |  44 +--
 .../storage/HBaseTimelineReaderImpl.java|   4 +-
 .../storage/HBaseTimelineWriterImpl.java|   4 +-
 .../storage/TimelineSchemaCreator.java  |   4 +-
 .../storage/common/AppIdKeyConverter.java   |   5 +-
 .../common/HBaseTimelineStorageUtils.java   | 306 +++
 .../storage/common/LongConverter.java   |   2 +-
 .../storage/common/TimelineStorageUtils.java| 265 
 .../storage/flow/FlowActivityColumnPrefix.java  |  10 +-
 .../storage/flow/FlowActivityRowKey.java|   4 +-
 .../storage/flow/FlowRunColumn.java |   6 +-
 .../storage/flow/FlowRunColumnPrefix.java   |   6 +-
 .../storage/flow/FlowRunCoprocessor.java|   4 +-
 .../storage/flow/FlowScanner.java   |  13 +-
 .../storage/reader/EntityTypeReader.java|   6 +-
 .../storage/common/TestRowKeys.java |   2 +-
 18 files changed, 374 insertions(+), 329 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8e3a8f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 6bbafe3..a83d2dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -78,7 +78,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   private static HBaseTestingUtility util;
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -962,7 +962,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   assertEquals(1, entities.size());
 
   long firstFlowActivity =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
 
   DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
   uri = URI.create("http://localhost:; + serverPort + "/ws/v2/" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8e3a8f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java

hadoop git commit: YARN-5925. Extract hbase-backend-exclusive utility methods from TimelineStorageUtil. Contributed by Haibo Chen.

2016-12-09 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2a28e8cf0 -> 55f5886ea


YARN-5925. Extract hbase-backend-exclusive utility methods from 
TimelineStorageUtil. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55f5886e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55f5886e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55f5886e

Branch: refs/heads/trunk
Commit: 55f5886ea24671ff3db87a64aaba2e76b3355455
Parents: 2a28e8c
Author: Sangjin Lee 
Authored: Fri Dec 9 16:17:24 2016 -0800
Committer: Sangjin Lee 
Committed: Fri Dec 9 16:17:24 2016 -0800

--
 ...stTimelineReaderWebServicesHBaseStorage.java |   6 +-
 .../flow/TestHBaseStorageFlowActivity.java  |  12 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  14 +-
 .../flow/TestHBaseStorageFlowRunCompaction.java |  44 ++--
 .../storage/common/AppIdKeyConverter.java   |   5 +-
 .../common/HBaseTimelineStorageUtils.java   | 243 +++
 .../storage/common/TimelineStorageUtils.java| 207 
 .../storage/flow/FlowActivityColumnPrefix.java  |  10 +-
 .../storage/flow/FlowActivityRowKey.java|   4 +-
 .../storage/flow/FlowRunColumn.java |   6 +-
 .../storage/flow/FlowRunColumnPrefix.java   |   6 +-
 .../storage/flow/FlowRunCoprocessor.java|   6 +-
 .../storage/flow/FlowScanner.java   |  13 +-
 .../storage/common/TestRowKeys.java |   2 +-
 14 files changed, 309 insertions(+), 269 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f5886e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 17c01b5..63a75d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -78,7 +78,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   private static HBaseTestingUtility util;
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -984,7 +984,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   assertEquals(1, entities.size());
 
   long firstFlowActivity =
-  TimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
+  HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
 
   DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
   uri = URI.create("http://localhost:; + serverPort + "/ws/v2/" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f5886e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
--
diff --git 

[2/2] hadoop git commit: MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by Li Lu.

2016-12-08 Thread sjlee
MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by 
Li Lu.

(cherry picked from commit 6217b87f4a056cf704cef2e073b386b7803415de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/385d8fae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/385d8fae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/385d8fae

Branch: refs/heads/YARN-5355-branch-2
Commit: 385d8fae88d5aaf439333f780189374116a5afee
Parents: d8e424d
Author: Sangjin Lee 
Authored: Thu Dec 8 18:14:09 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 18:14:46 2016 -0800

--
 .../org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java | 3 +--
 .../java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java| 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d8fae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
index 447ea4e..d553596 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
@@ -54,7 +53,7 @@ class JobHistoryFileReplayMapperV1 extends
 
   public void map(IntWritable key, IntWritable val, Context context) throws 
IOException {
 // collect the apps it needs to process
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 TimelineEntityConverterV1 converter = new TimelineEntityConverterV1();
 JobHistoryFileReplayHelper helper = new 
JobHistoryFileReplayHelper(context);
 int replayMode = helper.getReplayMode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/385d8fae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
index 16d14a1..6d6151f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 
 /**
* Adds simple entities with random string payload, events, metrics, and
@@ -46,7 +45,7 @@ class SimpleEntityWriterV1
 
   public void map(IntWritable key, IntWritable val, Context context)
   throws IOException {
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 Configuration conf = context.getConfiguration();
 
 final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by Li Lu.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 17c4ab7eb -> 6217b87f4
  refs/heads/YARN-5355-branch-2 d8e424d85 -> 385d8fae8


MAPREDUCE-6818. Remove direct reference to TimelineClientImpl. Contributed by 
Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6217b87f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6217b87f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6217b87f

Branch: refs/heads/YARN-5355
Commit: 6217b87f4a056cf704cef2e073b386b7803415de
Parents: 17c4ab7
Author: Sangjin Lee 
Authored: Thu Dec 8 18:14:09 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 18:14:09 2016 -0800

--
 .../org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java | 3 +--
 .../java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java| 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6217b87f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
index 447ea4e..d553596 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
@@ -54,7 +53,7 @@ class JobHistoryFileReplayMapperV1 extends
 
   public void map(IntWritable key, IntWritable val, Context context) throws 
IOException {
 // collect the apps it needs to process
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 TimelineEntityConverterV1 converter = new TimelineEntityConverterV1();
 JobHistoryFileReplayHelper helper = new 
JobHistoryFileReplayHelper(context);
 int replayMode = helper.getReplayMode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6217b87f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
index 16d14a1..6d6151f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 
 /**
* Adds simple entities with random string payload, events, metrics, and
@@ -46,7 +45,7 @@ class SimpleEntityWriterV1
 
   public void map(IntWritable key, IntWritable val, Context context)
   throws IOException {
-TimelineClient tlc = new TimelineClientImpl();
+TimelineClient tlc = TimelineClient.createTimelineClient();
 Configuration conf = context.getConfiguration();
 
 final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS classes. Contributed by Haibo Chen.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 12bce022e -> 17c4ab7eb
  refs/heads/YARN-5355-branch-2 5ee182f03 -> d8e424d85


YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS 
classes. Contributed by Haibo Chen.

(cherry picked from commit a5a55a54ab1568e941062ea3dabdd237f71f15c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17c4ab7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17c4ab7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17c4ab7e

Branch: refs/heads/YARN-5355
Commit: 17c4ab7ebb51088caf36fafedae8c256481eeed5
Parents: 12bce02
Author: Sangjin Lee 
Authored: Thu Dec 8 12:31:12 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 12:46:47 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../collector/TimelineCollectorManager.java | 26 +--
 .../reader/TimelineReaderServer.java| 26 +--
 .../collector/TestTimelineCollectorManager.java | 74 
 .../reader/TestTimelineReaderServer.java| 43 
 5 files changed, 166 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c4ab7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bbfcba8..8752e5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2004,9 +2004,18 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
   TIMELINE_SERVICE_PREFIX + "writer.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_WRITER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice"
+  + ".storage.HBaseTimelineWriterImpl";
+
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice" +
+  ".storage.HBaseTimelineReaderImpl";
+
+
   /**
* default schema prefix for hbase tables.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c4ab7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 9758320..19896e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -59,10 +58,7 @@ public class TimelineCollectorManager extends 
AbstractService {
 
   @Override
   public void serviceInit(Configuration conf) throws Exception {
-writer = ReflectionUtils.newInstance(conf.getClass(
-YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
-HBaseTimelineWriterImpl.class,
-TimelineWriter.class), conf);
+writer = createTimelineWriter(conf);
 writer.init(conf);
 // create a single dedicated thread for flushing the writer on a periodic
 // basis
@@ -75,6 +71,26 @@ public class 

[2/2] hadoop git commit: YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS classes. Contributed by Haibo Chen.

2016-12-08 Thread sjlee
YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS 
classes. Contributed by Haibo Chen.

(cherry picked from commit a5a55a54ab1568e941062ea3dabdd237f71f15c4)
(cherry picked from commit 17c4ab7ebb51088caf36fafedae8c256481eeed5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8e424d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8e424d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8e424d8

Branch: refs/heads/YARN-5355-branch-2
Commit: d8e424d8555790b7ba451c4c662a9988e2f9fa33
Parents: 5ee182f
Author: Sangjin Lee 
Authored: Thu Dec 8 12:31:12 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 12:47:49 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../collector/TimelineCollectorManager.java | 26 +--
 .../reader/TimelineReaderServer.java| 26 +--
 .../collector/TestTimelineCollectorManager.java | 74 
 .../reader/TestTimelineReaderServer.java| 43 
 5 files changed, 166 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8e424d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bb1a5d2..659b5eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2015,9 +2015,18 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
   TIMELINE_SERVICE_PREFIX + "writer.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_WRITER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice"
+  + ".storage.HBaseTimelineWriterImpl";
+
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice" +
+  ".storage.HBaseTimelineReaderImpl";
+
+
   /**
* default schema prefix for hbase tables.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8e424d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 9758320..19896e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -59,10 +58,7 @@ public class TimelineCollectorManager extends 
AbstractService {
 
   @Override
   public void serviceInit(Configuration conf) throws Exception {
-writer = ReflectionUtils.newInstance(conf.getClass(
-YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
-HBaseTimelineWriterImpl.class,
-TimelineWriter.class), conf);
+writer = createTimelineWriter(conf);
 writer.init(conf);
 // create a single dedicated thread for flushing the writer on a periodic
 // basis
@@ -75,6 +71,26 @@ public class TimelineCollectorManager extends 
AbstractService {
 

hadoop git commit: YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS classes. Contributed by Haibo Chen.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/trunk c26551572 -> a5a55a54a


YARN-5922. Remove direct references of HBaseTimelineWriter/Reader in core ATS 
classes. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5a55a54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5a55a54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5a55a54

Branch: refs/heads/trunk
Commit: a5a55a54ab1568e941062ea3dabdd237f71f15c4
Parents: c265515
Author: Sangjin Lee 
Authored: Thu Dec 8 12:31:12 2016 -0800
Committer: Sangjin Lee 
Committed: Thu Dec 8 12:31:12 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  9 +++
 .../collector/TimelineCollectorManager.java | 26 +--
 .../reader/TimelineReaderServer.java| 26 +--
 .../collector/TestTimelineCollectorManager.java | 74 
 .../reader/TestTimelineReaderServer.java| 43 
 5 files changed, 166 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a55a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index fce78c9..4934964 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2019,9 +2019,18 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
   TIMELINE_SERVICE_PREFIX + "writer.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_WRITER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice"
+  + ".storage.HBaseTimelineWriterImpl";
+
   public static final String TIMELINE_SERVICE_READER_CLASS =
   TIMELINE_SERVICE_PREFIX + "reader.class";
 
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
+  "org.apache.hadoop.yarn.server.timelineservice" +
+  ".storage.HBaseTimelineReaderImpl";
+
+
   /** The setting that controls how often the timeline collector flushes the
* timeline writer.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a55a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 9758320..19896e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import 
org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -59,10 +58,7 @@ public class TimelineCollectorManager extends 
AbstractService {
 
   @Override
   public void serviceInit(Configuration conf) throws Exception {
-writer = ReflectionUtils.newInstance(conf.getClass(
-YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
-HBaseTimelineWriterImpl.class,
-TimelineWriter.class), conf);
+writer = createTimelineWriter(conf);
 writer.init(conf);
 // create a single dedicated thread for flushing the writer on a periodic
 // basis
@@ -75,6 +71,26 @@ public class TimelineCollectorManager extends 
AbstractService {
 super.serviceInit(conf);
   }
 
+  

[1/2] hadoop git commit: YARN-5433. Audit dependencies for Category-X. Contributed by Sangjin Lee.

2016-12-08 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 5c58e55ea -> 5ee182f03


YARN-5433. Audit dependencies for Category-X. Contributed by Sangjin Lee.

(cherry picked from commit f511cc89b66997e496f630bdd299d3068d43fd31)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2dd4185
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2dd4185
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2dd4185

Branch: refs/heads/YARN-5355-branch-2
Commit: f2dd4185478ffd9dbfb57b22d569840bfcd94ff3
Parents: 5c58e55
Author: Sangjin Lee 
Authored: Wed Oct 26 11:31:00 2016 -0700
Committer: Sangjin Lee 
Committed: Thu Dec 8 11:47:24 2016 -0800

--
 LICENSE.txt | 446 +++
 .../pom.xml |   8 +
 .../hadoop-yarn-server-timelineservice/pom.xml  |   4 +
 3 files changed, 458 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2dd4185/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 0e4b492..ee5d528 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -485,6 +485,8 @@ And the binary distribution of this product bundles these 
dependencies under the
 following license:
 Mockito 1.8.5
 SLF4J 1.7.10
+JCodings 1.0.8
+Joni 2.1.2
 

 
 The MIT License (MIT)
@@ -1540,6 +1542,12 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
+StringTemplate 4 4.0.7
+ANTLR 3 Tool 3.5
+ANTLR 3 Runtime 3.5
+ANTLR StringTemplate 3.2.1
+ASM All 5.0.2
+sqlline 1.1.8
 

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1767,3 +1775,441 @@ representations with respect to the Work not specified 
here. Licensor shall not
 be bound by any additional provisions that may appear in any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+jamon-runtime 2.3.1
+
+  MOZILLA PUBLIC LICENSE
+Version 1.1
+
+  ---
+
+1. Definitions.
+
+ 1.0.1. "Commercial Use" means distribution or otherwise making the
+ Covered Code available to a third party.
+
+ 1.1. "Contributor" means each entity that creates or contributes to
+ the creation of Modifications.
+
+ 1.2. "Contributor Version" means the combination of the Original
+ Code, prior Modifications used by a Contributor, and the Modifications
+ made by that particular Contributor.
+
+ 1.3. "Covered Code" means the Original Code or Modifications or the
+ combination of the Original Code and Modifications, in each case
+ including portions thereof.
+
+ 1.4. "Electronic Distribution Mechanism" means a mechanism generally
+ accepted in the software development community for the electronic
+ transfer of data.
+
+ 1.5. "Executable" means Covered Code in any form other than Source
+ Code.
+
+ 1.6. "Initial Developer" means the individual or entity identified
+ as the Initial Developer in the Source Code notice required by Exhibit
+ A.
+
+ 1.7. "Larger Work" means a work which combines Covered Code or
+ portions thereof with code not governed by the terms of this License.
+
+ 1.8. "License" means this document.
+
+ 1.8.1. "Licensable" means having the right to grant, to the maximum
+ extent possible, whether at the time of the initial grant or
+ subsequently acquired, any and all of the rights conveyed herein.
+
+ 1.9. "Modifications" means any addition to or deletion from the
+ substance or structure of either the Original Code or any previous
+ Modifications. When Covered Code is released as a series of files, a
+ Modification is:
+  A. Any addition to or deletion from the contents of a file
+  containing Original Code or previous Modifications.
+
+  B. Any new file that contains any part of the Original Code or
+  previous Modifications.
+
+ 1.10. "Original Code" means Source Code of computer software code
+ which is described in the Source Code notice required by Exhibit A as
+ Original Code, and which, at the time of its release under this
+ License is not already Covered Code governed by this License.
+
+ 

[2/2] hadoop git commit: YARN-5572. HBaseTimelineWriterImpl appears to reference a bad property name. Contributed by Varun Saxena.

2016-12-08 Thread sjlee
YARN-5572. HBaseTimelineWriterImpl appears to reference a bad property name. 
Contributed by Varun Saxena.

(cherry picked from commit c06114d6a360dddeb66c2dd9ad4fa5dae0cfbfb1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ee182f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ee182f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ee182f0

Branch: refs/heads/YARN-5355-branch-2
Commit: 5ee182f030bab1fa821593fe13d124d021823a39
Parents: f2dd418
Author: Naganarasimha 
Authored: Sun Nov 27 23:35:53 2016 +0530
Committer: Sangjin Lee 
Committed: Thu Dec 8 11:51:10 2016 -0800

--
 .../TestTimelineReaderWebServicesHBaseStorage.java|  2 +-
 .../timelineservice/storage/DataGeneratorForTest.java |  4 ++--
 .../storage/TestHBaseTimelineStorageApps.java |  8 
 .../storage/TestHBaseTimelineStorageEntities.java |  6 +++---
 .../storage/flow/TestHBaseStorageFlowActivity.java|  6 +++---
 .../storage/flow/TestHBaseStorageFlowRun.java | 14 +++---
 .../flow/TestHBaseStorageFlowRunCompaction.java   |  2 +-
 .../storage/HBaseTimelineWriterImpl.java  |  5 -
 8 files changed, 21 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee182f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index e97ea5b..6bbafe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -334,7 +334,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 HBaseTimelineWriterImpl hbi = null;
 Configuration c1 = util.getConfiguration();
 try {
-  hbi = new HBaseTimelineWriterImpl(c1);
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(c1);
   hbi.write(cluster, user, flow, flowVersion, runid, entity.getId(), te);
   hbi.write(cluster, user, flow, flowVersion, runid, entity1.getId(), te1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee182f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index b56a752..cafacab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -183,7 +183,7 @@ final class DataGeneratorForTest {
 te2.addEntity(entity2);
 HBaseTimelineWriterImpl hbi = null;
 try {
-  hbi = new HBaseTimelineWriterImpl(util.getConfiguration());
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(util.getConfiguration());
   hbi.start();
   String cluster = "cluster1";
@@ -401,7 +401,7 @@ final class DataGeneratorForTest {
 
 HBaseTimelineWriterImpl hbi = null;
 try {
-  hbi = new HBaseTimelineWriterImpl(util.getConfiguration());
+  hbi = new HBaseTimelineWriterImpl();
   hbi.init(util.getConfiguration());
   hbi.start();
   String cluster = "cluster1";


[1/2] hadoop git commit: HADOOP-13859. TestConfigurationFieldsBase fails for fields that are DEFAULT values of skipped properties. (Haibo Chen via kasha)

2016-12-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 3b2e80881 -> 12bce022e
  refs/heads/YARN-5355-branch-2 4537f5665 -> 5c58e55ea


HADOOP-13859. TestConfigurationFieldsBase fails for fields that are DEFAULT 
values of skipped properties. (Haibo Chen via kasha)

(cherry picked from commit c8d0a049b00536385f06fad412a2288f005bf2ce)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12bce022
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12bce022
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12bce022

Branch: refs/heads/YARN-5355
Commit: 12bce022ea1dd9743f441a60ac20b95da411be93
Parents: 3b2e808
Author: Karthik Kambatla 
Authored: Tue Dec 6 14:08:08 2016 -0800
Committer: Sangjin Lee 
Committed: Tue Dec 6 18:07:23 2016 -0800

--
 .../conf/TestConfigurationFieldsBase.java   | 20 ++--
 .../hadoop/tools/TestHdfsConfigFields.java  |  6 -
 .../yarn/conf/TestYarnConfigurationFields.java  | 24 
 3 files changed, 18 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12bce022/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index eab0161..11da129 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -196,6 +196,12 @@ public abstract class TestConfigurationFieldsBase {
   if (!f.getType().getName().equals("java.lang.String")) {
 continue;
   }
+
+  // filter out default-value fields
+  if (isFieldADefaultValue(f)) {
+continue;
+  }
+
   // Convert found member into String
   try {
 value = (String) f.get(null);
@@ -323,6 +329,17 @@ public abstract class TestConfigurationFieldsBase {
   }
 
   /**
+   * Test if a field is a default value of another property by
+   * checking if its name starts with "DEFAULT_" or ends with
+   * "_DEFAULT".
+   * @param field the field to check
+   */
+  private static boolean isFieldADefaultValue(Field field) {
+return field.getName().startsWith("DEFAULT_") ||
+field.getName().endsWith("_DEFAULT");
+  }
+
+  /**
* Utility function to extract public static final default
* member variables from a Configuration type class.
*
@@ -354,8 +371,7 @@ public abstract class TestConfigurationFieldsBase {
   }
   // Special: Stuff any property beginning with "DEFAULT_" into a
   // different hash for later processing
-  if (f.getName().startsWith("DEFAULT_") ||
-  f.getName().endsWith("_DEFAULT")) {
+  if (isFieldADefaultValue(f)) {
 if (retVal.containsKey(f.getName())) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12bce022/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index bf29428..95ba822 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -56,12 +56,6 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
 // Remove deprecated properties listed in Configuration#DeprecationDelta
 configurationPropsToSkipCompare.add(DFSConfigKeys.DFS_DF_INTERVAL_KEY);
 
-// Remove default properties
-configurationPropsToSkipCompare
-.add(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
-configurationPropsToSkipCompare
-.add(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
-
 // Remove support property
 configurationPropsToSkipCompare
 .add(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12bce022/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 

[2/2] hadoop git commit: HADOOP-13859. TestConfigurationFieldsBase fails for fields that are DEFAULT values of skipped properties. (Haibo Chen via kasha)

2016-12-06 Thread sjlee
HADOOP-13859. TestConfigurationFieldsBase fails for fields that are DEFAULT 
values of skipped properties. (Haibo Chen via kasha)

(cherry picked from commit c8d0a049b00536385f06fad412a2288f005bf2ce)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c58e55e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c58e55e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c58e55e

Branch: refs/heads/YARN-5355-branch-2
Commit: 5c58e55ea8a79ba8415f5cb1ae24ff10fcb1aefc
Parents: 4537f56
Author: Karthik Kambatla 
Authored: Tue Dec 6 14:08:08 2016 -0800
Committer: Sangjin Lee 
Committed: Tue Dec 6 18:16:36 2016 -0800

--
 .../conf/TestConfigurationFieldsBase.java   | 20 ++--
 .../hadoop/tools/TestHdfsConfigFields.java  |  6 -
 .../yarn/conf/TestYarnConfigurationFields.java  | 24 
 3 files changed, 18 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c58e55e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index eab0161..11da129 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -196,6 +196,12 @@ public abstract class TestConfigurationFieldsBase {
   if (!f.getType().getName().equals("java.lang.String")) {
 continue;
   }
+
+  // filter out default-value fields
+  if (isFieldADefaultValue(f)) {
+continue;
+  }
+
   // Convert found member into String
   try {
 value = (String) f.get(null);
@@ -323,6 +329,17 @@ public abstract class TestConfigurationFieldsBase {
   }
 
   /**
+   * Test if a field is a default value of another property by
+   * checking if its name starts with "DEFAULT_" or ends with
+   * "_DEFAULT".
+   * @param field the field to check
+   */
+  private static boolean isFieldADefaultValue(Field field) {
+return field.getName().startsWith("DEFAULT_") ||
+field.getName().endsWith("_DEFAULT");
+  }
+
+  /**
* Utility function to extract public static final default
* member variables from a Configuration type class.
*
@@ -354,8 +371,7 @@ public abstract class TestConfigurationFieldsBase {
   }
   // Special: Stuff any property beginning with "DEFAULT_" into a
   // different hash for later processing
-  if (f.getName().startsWith("DEFAULT_") ||
-  f.getName().endsWith("_DEFAULT")) {
+  if (isFieldADefaultValue(f)) {
 if (retVal.containsKey(f.getName())) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c58e55e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index ae13f06..92019f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -59,12 +59,6 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
 // Remove deprecated properties in DeprecatedProperties.md
 configurationPropsToSkipCompare.add(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY);
 
-// Remove default properties
-configurationPropsToSkipCompare
-.add(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
-configurationPropsToSkipCompare
-.add(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
-
 // Remove support property
 configurationPropsToSkipCompare
 .add(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c58e55e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 

[5/6] hadoop git commit: YARN-4765 Split TestHBaseTimelineStorage into multiple test classes (Varun Saxena via Vrushali C)

2016-12-06 Thread sjlee
YARN-4765 Split TestHBaseTimelineStorage into multiple test classes (Varun 
Saxena via Vrushali C)

(cherry picked from commit 513dcf6817dd76fde8096ff04cd888d7c908461d)
(cherry picked from commit 022bf783aa89c1c81374ebef5dba2df95b7563b5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68de9ea8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68de9ea8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68de9ea8

Branch: refs/heads/YARN-5355-branch-2
Commit: 68de9ea8bad605ce2aaf22fc97b46dbe7726bbf0
Parents: 5c0cc0e
Author: Vrushali Channapattan 
Authored: Thu Oct 27 14:37:50 2016 -0700
Committer: Sangjin Lee 
Committed: Tue Dec 6 08:39:46 2016 -0800

--
 .../storage/DataGeneratorForTest.java   |  381 ++
 .../storage/TestHBaseTimelineStorage.java   | 3751 --
 .../storage/TestHBaseTimelineStorageApps.java   | 1849 +
 .../TestHBaseTimelineStorageEntities.java   | 1675 
 4 files changed, 3905 insertions(+), 3751 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68de9ea8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
new file mode 100644
index 000..0938e9e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -0,0 +1,381 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+
+final class DataGeneratorForTest {
+  static void loadApps(HBaseTestingUtility util) throws IOException {
+TimelineEntities te = new TimelineEntities();
+TimelineEntity entity = new TimelineEntity();
+String id = "application_11_";
+entity.setId(id);
+entity.setType(TimelineEntityType.YARN_APPLICATION.toString());
+Long cTime = 1425016502000L;
+entity.setCreatedTime(cTime);
+// add the info map in Timeline Entity
+Map infoMap = new HashMap<>();
+infoMap.put("infoMapKey1", "infoMapValue2");
+infoMap.put("infoMapKey2", 20);
+infoMap.put("infoMapKey3", 85.85);
+entity.addInfo(infoMap);
+// add the isRelatedToEntity info
+Set isRelatedToSet = new HashSet<>();
+isRelatedToSet.add("relatedto1");
+Map isRelatedTo = new HashMap<>();
+isRelatedTo.put("task", isRelatedToSet);
+entity.setIsRelatedToEntities(isRelatedTo);
+// add the relatesTo info
+Set relatesToSet = new HashSet<>();
+relatesToSet.add("relatesto1");
+relatesToSet.add("relatesto3");
+Map relatesTo = new HashMap<>();
+relatesTo.put("container", relatesToSet);
+Set relatesToSet11 = 

[1/6] hadoop git commit: YARN-5739. Provide timeline reader API to list available timeline entity types for one application. Contributed by Li Lu.

2016-12-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 f734977b2 -> 3b2e80881
  refs/heads/YARN-5355-branch-2 5c0cc0ebf -> 4537f5665


YARN-5739. Provide timeline reader API to list available timeline entity types 
for one application. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b2e8088
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b2e8088
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b2e8088

Branch: refs/heads/YARN-5355
Commit: 3b2e80881eaeaa82667496d706496765ed7e29f5
Parents: f734977
Author: Sangjin Lee 
Authored: Tue Dec 6 08:28:43 2016 -0800
Committer: Sangjin Lee 
Committed: Tue Dec 6 08:28:43 2016 -0800

--
 .../storage/DataGeneratorForTest.java   |  47 -
 .../TestHBaseTimelineStorageEntities.java   |  23 +++
 .../reader/TimelineReaderManager.java   |  20 +++
 .../reader/TimelineReaderWebServices.java   | 102 +++
 .../storage/FileSystemTimelineReaderImpl.java   |  21 +++
 .../storage/HBaseTimelineReaderImpl.java|   8 +
 .../timelineservice/storage/TimelineReader.java |  13 ++
 .../storage/common/TimelineStorageUtils.java|  36 
 .../reader/AbstractTimelineStorageReader.java   | 145 +++
 .../storage/reader/ApplicationEntityReader.java |  14 +-
 .../storage/reader/EntityTypeReader.java| 180 +++
 .../storage/reader/GenericEntityReader.java |  77 +---
 .../storage/reader/TimelineEntityReader.java|  29 +--
 .../reader/TimelineEntityReaderFactory.java |  13 ++
 14 files changed, 613 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b2e8088/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index 0938e9e..b56a752 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -31,6 +31,7 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 
 final class DataGeneratorForTest {
   static void loadApps(HBaseTestingUtility util) throws IOException {
@@ -358,6 +359,46 @@ final class DataGeneratorForTest {
 relatesTo3.put("container2", relatesToSet14);
 entity2.setRelatesToEntities(relatesTo3);
 te.addEntity(entity2);
+
+// For listing types
+for (int i = 0; i < 10; i++) {
+  TimelineEntity entity3 = new TimelineEntity();
+  String id3 = "typeTest" + i;
+  entity3.setId(id3);
+  StringBuilder typeName = new StringBuilder("newType");
+  for (int j = 0; j < (i % 3); j++) {
+typeName.append(" ").append(j);
+  }
+  entity3.setType(typeName.toString());
+  entity3.setCreatedTime(cTime + 80L + i);
+  te.addEntity(entity3);
+}
+
+// Create app entity for app to flow table
+TimelineEntities appTe1 = new TimelineEntities();
+TimelineEntity entityApp1 = new TimelineEntity();
+String appName1 = "application_123111_";
+entityApp1.setId(appName1);
+entityApp1.setType(TimelineEntityType.YARN_APPLICATION.toString());
+entityApp1.setCreatedTime(cTime + 40L);
+TimelineEvent appCreationEvent1 = new TimelineEvent();
+appCreationEvent1.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+appCreationEvent1.setTimestamp(cTime);
+entityApp1.addEvent(appCreationEvent1);
+appTe1.addEntity(entityApp1);
+
+TimelineEntities appTe2 = new TimelineEntities();
+TimelineEntity entityApp2 = new TimelineEntity();
+String appName2 = "application_123111_1112";
+entityApp2.setId(appName2);
+

  1   2   3   4   5   6   7   8   9   10   >