hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-03-10 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 cff6db1f9 -> acafc950d


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acafc950
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acafc950
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acafc950

Branch: refs/heads/branch-2.7
Commit: acafc950d9347769c3729d571121b3525c6d5eb2
Parents: cff6db1
Author: Masatake Iwasaki 
Authored: Fri Mar 11 15:12:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Mar 11 15:12:22 2016 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../main/java/org/apache/hadoop/ipc/Client.java | 35 +-
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 68 
 4 files changed, 94 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acafc950/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a2381fa..2d6107c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -902,6 +902,8 @@ Release 2.6.5 - UNRELEASED
 HADOOP-12789. log classpath of ApplicationClassLoader at INFO level
 (Sangjin Lee via mingma)
 
+HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acafc950/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 42b9934..4c5da8e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -383,15 +383,16 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
 private final int maxRetriesOnSasl;
 private int maxRetriesOnSocketTimeouts;
-private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -429,6 +430,9 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  this.soTimeout =
+  (rpcTimeout == 0 || (doPing && pingInterval < rpcTimeout))?
+  this.pingInterval : this.rpcTimeout;
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -479,12 +483,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if (shouldCloseConnection.get() || !running.get() || rpcTimeout > 0) {
+  private void handleTimeout(SocketTimeoutException e, int waiting)
+  throws IOException {
+if (shouldCloseConnection.get() || !running.get() ||
+(0 < rpcTimeout && rpcTimeout <= waiting)) {
   throw e;
 } else {
   sendPing();
@@ -498,11 +502,13 @@ public class Client {
*/
   @Override
   public int read() throws IOException {
+int waiting = 0;
   

hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-03-10 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 d61a2fd1d -> f311023f1


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

(cherry picked from commit acafc950d9347769c3729d571121b3525c6d5eb2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f311023f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f311023f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f311023f

Branch: refs/heads/branch-2.6
Commit: f311023f1205b1ea4d51a5b9da5df89f4040af32
Parents: d61a2fd
Author: Masatake Iwasaki 
Authored: Fri Mar 11 15:12:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Mar 11 15:39:57 2016 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../main/java/org/apache/hadoop/ipc/Client.java | 35 +-
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 68 
 4 files changed, 94 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f311023f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5782165..e17f122 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -17,6 +17,8 @@ Release 2.6.5 - UNRELEASED
 HADOOP-12789. log classpath of ApplicationClassLoader at INFO level
 (Sangjin Lee via mingma)
 
+HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f311023f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index dadfd79..f11c1e1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -377,15 +377,16 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
 private final int maxRetriesOnSasl;
 private int maxRetriesOnSocketTimeouts;
-private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -423,6 +424,9 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  this.soTimeout =
+  (rpcTimeout == 0 || (doPing && pingInterval < rpcTimeout))?
+  this.pingInterval : this.rpcTimeout;
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -473,12 +477,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if (shouldCloseConnection.get() || !running.get() || rpcTimeout > 0) {
+  private void handleTimeout(SocketTimeoutException e, int waiting)
+  throws IOException {
+if (shouldCloseConnection.get() || !running.get() ||
+(0 < rpcTimeout && rpcTimeout <= waiting)) {
   throw e;
 } else {
   sendPing();
@@ -492,11 +496,13 @@ public class Client {
*/
   @Override
   

hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-03-10 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a1f110a14 -> 0d3272b54


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

(cherry picked from commit 682adc6ba9db3bed94fd4ea3d83761db6abfe695)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d3272b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d3272b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d3272b5

Branch: refs/heads/branch-2.8
Commit: 0d3272b54efb9566fedbc83286cec17dbd41e05c
Parents: a1f110a
Author: Masatake Iwasaki 
Authored: Fri Mar 11 15:03:40 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Mar 11 15:08:23 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 33 ++
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 68 
 3 files changed, 91 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d3272b5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 8100237..cb0f444 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -386,7 +386,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +394,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +435,9 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  this.soTimeout =
+  (rpcTimeout == 0 || (doPing && pingInterval < rpcTimeout))?
+  this.pingInterval : this.rpcTimeout;
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +488,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if (shouldCloseConnection.get() || !running.get() || rpcTimeout > 0) {
+  private void handleTimeout(SocketTimeoutException e, int waiting)
+  throws IOException {
+if (shouldCloseConnection.get() || !running.get() ||
+(0 < rpcTimeout && rpcTimeout <= waiting)) {
   throw e;
 } else {
   sendPing();
@@ -503,11 +507,13 @@ public class Client {
*/
   @Override
   public int read() throws IOException {
+int waiting = 0;
 do {
   try {
 return super.read();
   } catch (SocketTimeoutException e) {
-handleTimeout(e);
+waiting += soTimeout;
+handleTimeout(e, waiting);
   }
 } while (true);
   }
@@ -520,11 +526,13 @@ public class Client {
*/
   @Override
   public int read(byte[] buf, int off, int len) throws IOException {
+int waiting = 0;
 do {
   try {
 return super.read(buf, off, len);
   } catch (SocketTimeoutException e) {
-handleTimeout(e);
+waiting += soTimeout;
+handleTimeout(e, waiting);
   }
   

hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-03-10 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b0ea50bb2 -> 7994ab3fa


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

(cherry picked from commit 682adc6ba9db3bed94fd4ea3d83761db6abfe695)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7994ab3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7994ab3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7994ab3f

Branch: refs/heads/branch-2
Commit: 7994ab3fa01699ebea3edb3d0b28946e90825ba2
Parents: b0ea50b
Author: Masatake Iwasaki 
Authored: Fri Mar 11 15:03:40 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Mar 11 15:05:34 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 33 ++
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 68 
 3 files changed, 91 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7994ab3f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 1d46099..dcebe51 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -386,7 +386,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +394,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +435,9 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  this.soTimeout =
+  (rpcTimeout == 0 || (doPing && pingInterval < rpcTimeout))?
+  this.pingInterval : this.rpcTimeout;
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +488,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if (shouldCloseConnection.get() || !running.get() || rpcTimeout > 0) {
+  private void handleTimeout(SocketTimeoutException e, int waiting)
+  throws IOException {
+if (shouldCloseConnection.get() || !running.get() ||
+(0 < rpcTimeout && rpcTimeout <= waiting)) {
   throw e;
 } else {
   sendPing();
@@ -503,11 +507,13 @@ public class Client {
*/
   @Override
   public int read() throws IOException {
+int waiting = 0;
 do {
   try {
 return super.read();
   } catch (SocketTimeoutException e) {
-handleTimeout(e);
+waiting += soTimeout;
+handleTimeout(e, waiting);
   }
 } while (true);
   }
@@ -520,11 +526,13 @@ public class Client {
*/
   @Override
   public int read(byte[] buf, int off, int len) throws IOException {
+int waiting = 0;
 do {
   try {
 return super.read(buf, off, len);
   } catch (SocketTimeoutException e) {
-handleTimeout(e);
+waiting += soTimeout;
+handleTimeout(e, waiting);
   }
   

hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-03-10 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk e01c6ea68 -> 682adc6ba


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/682adc6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/682adc6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/682adc6b

Branch: refs/heads/trunk
Commit: 682adc6ba9db3bed94fd4ea3d83761db6abfe695
Parents: e01c6ea
Author: Masatake Iwasaki 
Authored: Fri Mar 11 15:03:40 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Mar 11 15:03:40 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 33 ++
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 68 
 3 files changed, 91 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/682adc6b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 8d87957..3ae1d67 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -386,7 +386,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +394,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +435,9 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  this.soTimeout =
+  (rpcTimeout == 0 || (doPing && pingInterval < rpcTimeout))?
+  this.pingInterval : this.rpcTimeout;
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +488,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if (shouldCloseConnection.get() || !running.get() || rpcTimeout > 0) {
+  private void handleTimeout(SocketTimeoutException e, int waiting)
+  throws IOException {
+if (shouldCloseConnection.get() || !running.get() ||
+(0 < rpcTimeout && rpcTimeout <= waiting)) {
   throw e;
 } else {
   sendPing();
@@ -503,11 +507,13 @@ public class Client {
*/
   @Override
   public int read() throws IOException {
+int waiting = 0;
 do {
   try {
 return super.read();
   } catch (SocketTimeoutException e) {
-handleTimeout(e);
+waiting += soTimeout;
+handleTimeout(e, waiting);
   }
 } while (true);
   }
@@ -520,11 +526,13 @@ public class Client {
*/
   @Override
   public int read(byte[] buf, int off, int len) throws IOException {
+int waiting = 0;
 do {
   try {
 return super.read(buf, off, len);
   } catch (SocketTimeoutException e) {
-handleTimeout(e);
+waiting += soTimeout;
+handleTimeout(e, waiting);
   }
 } while (true);
   }
@@ -632,10 +640,7 @@ public class Client {
  

[1/2] hadoop git commit: HDFS-1477. Support reconfiguring dfs.heartbeat.interval and dfs.namenode.heartbeat.recheck-interval without NN restart. (Contributed by Xiaobing Zhou)

2016-03-10 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 710811652 -> b0ea50bb2
  refs/heads/trunk adf1cdf3d -> e01c6ea68


HDFS-1477. Support reconfiguring dfs.heartbeat.interval and 
dfs.namenode.heartbeat.recheck-interval without NN restart. (Contributed by 
Xiaobing Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e01c6ea6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e01c6ea6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e01c6ea6

Branch: refs/heads/trunk
Commit: e01c6ea688e62f25c4310e771a0cd85b53a5fb87
Parents: adf1cdf
Author: Arpit Agarwal 
Authored: Thu Mar 10 19:03:55 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Mar 10 19:03:55 2016 -0800

--
 .../server/blockmanagement/BlockManager.java|   3 +-
 .../server/blockmanagement/DatanodeManager.java |  44 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   1 +
 .../hadoop/hdfs/server/namenode/NameNode.java   | 103 --
 .../hdfs/server/namenode/NameNodeRpcServer.java |  28 ++--
 .../hdfs/server/namenode/NamenodeFsck.java  |   2 +-
 .../TestComputeInvalidateWork.java  |   2 +-
 .../namenode/TestNameNodeReconfigure.java   | 126 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 134 +++
 9 files changed, 357 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f12ea1b..6ed102c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -332,7 +332,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
-datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);
+datanodeManager.getBlockInvalidateLimit(),
+startupDelayBlockDeletionInMs);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 3072fc0..53c7c16 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.net.InetAddresses;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -70,6 +71,8 @@ public class DatanodeManager {
   private final HeartbeatManager heartbeatManager;
   private final FSClusterStats fsClusterStats;
 
+  private volatile long heartbeatIntervalSeconds;
+  private volatile int heartbeatRecheckInterval;
   /**
* Stores the datanode -> block map.  
* 
@@ -113,7 +116,7 @@ public class DatanodeManager {
   /** The period to wait for datanode heartbeat.*/
   private long heartbeatExpireInterval;
   /** Ask Datanode only up to this many blocks to delete. */
-  final int blockInvalidateLimit;
+  private volatile int blockInvalidateLimit;
 
   /** The interval for judging stale DataNodes for read/write */
   private final long staleInterval;
@@ -227,10 +230,10 @@ public class DatanodeManager {
   dnsToSwitchMapping.resolve(locations);
 }
 
-final long heartbeatIntervalSeconds = conf.getLong(
+heartbeatIntervalSeconds = conf.getLong(
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
 

[2/2] hadoop git commit: HDFS-1477. Support reconfiguring dfs.heartbeat.interval and dfs.namenode.heartbeat.recheck-interval without NN restart. (Contributed by Xiaobing Zhou)

2016-03-10 Thread arp
HDFS-1477. Support reconfiguring dfs.heartbeat.interval and 
dfs.namenode.heartbeat.recheck-interval without NN restart. (Contributed by 
Xiaobing Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0ea50bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0ea50bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0ea50bb

Branch: refs/heads/branch-2
Commit: b0ea50bb29d4821f088e3b7bfcf54de76c83805a
Parents: 7108116
Author: Arpit Agarwal 
Authored: Thu Mar 10 19:03:55 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Mar 10 19:05:22 2016 -0800

--
 .../server/blockmanagement/BlockManager.java|   3 +-
 .../server/blockmanagement/DatanodeManager.java |  44 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   1 +
 .../hadoop/hdfs/server/namenode/NameNode.java   | 103 --
 .../hdfs/server/namenode/NameNodeRpcServer.java |  28 ++--
 .../hdfs/server/namenode/NamenodeFsck.java  |   2 +-
 .../TestComputeInvalidateWork.java  |   2 +-
 .../namenode/TestNameNodeReconfigure.java   | 126 +
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 134 +++
 9 files changed, 357 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ea50bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6848ee2..e5ac484 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -305,7 +305,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
-datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);
+datanodeManager.getBlockInvalidateLimit(),
+startupDelayBlockDeletionInMs);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ea50bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 60e6610..edcf732 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.util.Time.monotonicNow;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.net.InetAddresses;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -67,6 +68,8 @@ public class DatanodeManager {
   private final HeartbeatManager heartbeatManager;
   private final FSClusterStats fsClusterStats;
 
+  private volatile long heartbeatIntervalSeconds;
+  private volatile int heartbeatRecheckInterval;
   /**
* Stores the datanode -> block map.  
* 
@@ -110,7 +113,7 @@ public class DatanodeManager {
   /** The period to wait for datanode heartbeat.*/
   private long heartbeatExpireInterval;
   /** Ask Datanode only up to this many blocks to delete. */
-  final int blockInvalidateLimit;
+  private volatile int blockInvalidateLimit;
 
   /** The interval for judging stale DataNodes for read/write */
   private final long staleInterval;
@@ -224,10 +227,10 @@ public class DatanodeManager {
   dnsToSwitchMapping.resolve(locations);
 }
 
-final long heartbeatIntervalSeconds = conf.getLong(
+heartbeatIntervalSeconds = conf.getLong(
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
 DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
-final int heartbeatRecheckInterval = conf.getInt(
+heartbeatRecheckInterval = conf.getInt(
   

hadoop git commit: HDFS-9925. Ozone: Add Ozone Client lib for bucket handling. Contributed by Anu Engineer.

2016-03-10 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 28f770d8b -> e4fb9bdd4


HDFS-9925. Ozone: Add Ozone Client lib for bucket handling. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4fb9bdd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4fb9bdd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4fb9bdd

Branch: refs/heads/HDFS-7240
Commit: e4fb9bdd49fbf16aec9bae74e3dd8699f9ea1ea6
Parents: 28f770d
Author: Chris Nauroth 
Authored: Thu Mar 10 15:23:41 2016 -0800
Committer: Chris Nauroth 
Committed: Thu Mar 10 15:23:41 2016 -0800

--
 .../hadoop/ozone/web/client/OzoneBucket.java| 128 ++
 .../hadoop/ozone/web/client/OzoneVolume.java| 416 +++
 .../ozone/web/handlers/VolumeHandler.java   |   4 +-
 .../web/handlers/VolumeProcessTemplate.java |  26 ++
 .../hadoop/ozone/web/request/OzoneAcl.java  |   6 +
 .../hadoop/ozone/web/response/ListBuckets.java  |   2 +-
 .../hadoop/ozone/web/client/TestBuckets.java| 174 
 7 files changed, 752 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4fb9bdd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
new file mode 100644
index 000..cde8a69
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.client;
+
+import org.apache.commons.httpclient.HttpException;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.ozone.web.request.OzoneAcl;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.http.HttpRequest;
+import org.apache.http.HttpRequestInterceptor;
+import org.apache.http.protocol.HTTP;
+import org.apache.http.protocol.HttpContext;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * A Bucket class the represents an Ozone Bucket.
+ */
+public class OzoneBucket {
+
+  private BucketInfo bucketInfo;
+  private OzoneVolume volume;
+
+  /**
+   * Constructor for bucket.
+   *
+   * @param info   - BucketInfo
+   * @param volume - OzoneVolume Object that contains this bucket
+   */
+  public OzoneBucket(BucketInfo info, OzoneVolume volume) {
+this.bucketInfo = info;
+this.volume = volume;
+  }
+
+  /**
+   * Gets bucket Info.
+   *
+   * @return BucketInfo
+   */
+  public BucketInfo getBucketInfo() {
+return bucketInfo;
+  }
+
+  /**
+   * Sets Bucket Info.
+   *
+   * @param bucketInfo BucketInfo
+   */
+  public void setBucketInfo(BucketInfo bucketInfo) {
+this.bucketInfo = bucketInfo;
+  }
+
+  /**
+   * Returns the parent volume class.
+   *
+   * @return - OzoneVolume
+   */
+  OzoneVolume getVolume() {
+return volume;
+  }
+
+  /**
+   * Returns bucket name.
+   *
+   * @return Bucket Name
+   */
+  public String getBucketName() {
+return bucketInfo.getBucketName();
+  }
+
+  /**
+   * Returns the Acls on the bucket.
+   *
+   * @return - Acls
+   */
+  public List getAcls() {
+return bucketInfo.getAcls();
+  }
+
+  /**
+   * Return versioning info on the bucket - Enabled or disabled.
+   *
+   * @return - Version Enum
+   */
+  public OzoneConsts.Versioning getVersioning() {
+return bucketInfo.getVersioning();
+  }
+
+  /**
+   * Gets the Storage class for the bucket.
+   *
+   * @return Storage Class Enum
+   */
+  public StorageType getStorageClass() {
+return bucketInfo.getStorageType();
+  }
+
+  private static class ContentLengthHeaderRemover implements
+  

hadoop git commit: HADOOP-12899. External distribution stitching scripts do not work correctly on Windows. Contributed by Chris Nauroth.

2016-03-10 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79961ecea -> adf1cdf3d


HADOOP-12899. External distribution stitching scripts do not work correctly on 
Windows. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adf1cdf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adf1cdf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adf1cdf3

Branch: refs/heads/trunk
Commit: adf1cdf3d5abed0dae76a4967129ce64f2e16c2f
Parents: 79961ec
Author: Chris Nauroth 
Authored: Thu Mar 10 14:49:08 2016 -0800
Committer: Chris Nauroth 
Committed: Thu Mar 10 14:49:08 2016 -0800

--
 hadoop-dist/pom.xml | 24 +---
 1 file changed, 13 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adf1cdf3/hadoop-dist/pom.xml
--
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index ce1bd92..42e74cb 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -90,16 +90,17 @@
 dist
 prepare-package
 
-exec
+  exec
 
 
-
${basedir}/../dev-support/bin/dist-layout-stitching
-
${project.build.directory}
-false
-
-   ${project.version}
-   ${project.build.directory}
-
+  ${shell-executable}
+  
${project.build.directory}
+  false
+  
+
${basedir}/../dev-support/bin/dist-layout-stitching
+${project.version}
+${project.build.directory}
+  
 
   
   
@@ -109,12 +110,13 @@
   exec
 
 
-
${basedir}/../dev-support/bin/dist-tar-stitching
+${shell-executable}
 
${project.build.directory}
 false
 
-   ${project.version}
-   ${project.build.directory}
+  
${basedir}/../dev-support/bin/dist-tar-stitching
+  ${project.version}
+  ${project.build.directory}
 
 
   



hadoop git commit: HDFS-9933. ReverseXML should be capitalized in oiv usage message (cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 56462678f -> a1f110a14


HDFS-9933. ReverseXML should be capitalized in oiv usage message (cmccabe)

(cherry picked from commit 79961ecea888e0ee85b7a75e239bb6bb3335eb17)
(cherry picked from commit 710811652c1ac1da9c4fe5f8b0be1bcff7a756f3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f110a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f110a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f110a1

Branch: refs/heads/branch-2.8
Commit: a1f110a1456160c8e442804d9d60653486032036
Parents: 5646267
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:43:27 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:45:24 2016 -0800

--
 .../hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f110a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index f2b308f..6b1b78a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -60,7 +60,7 @@ public class OfflineImageViewerPB {
   + "  * XML: This processor creates an XML document with all elements 
of\n"
   + "the fsimage enumerated, suitable for further analysis by XML\n"
   + "tools.\n"
-  + "  * reverseXML: This processor takes an XML file and creates a\n"
+  + "  * ReverseXML: This processor takes an XML file and creates a\n"
   + "binary fsimage containing the same elements.\n"
   + "  * FileDistribution: This processor analyzes the file size\n"
   + "distribution in the image.\n"



hadoop git commit: HDFS-9933. ReverseXML should be capitalized in oiv usage message (cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c82d1e118 -> 710811652


HDFS-9933. ReverseXML should be capitalized in oiv usage message (cmccabe)

(cherry picked from commit 79961ecea888e0ee85b7a75e239bb6bb3335eb17)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71081165
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71081165
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71081165

Branch: refs/heads/branch-2
Commit: 710811652c1ac1da9c4fe5f8b0be1bcff7a756f3
Parents: c82d1e1
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:43:27 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:45:02 2016 -0800

--
 .../hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71081165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index f2b308f..6b1b78a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -60,7 +60,7 @@ public class OfflineImageViewerPB {
   + "  * XML: This processor creates an XML document with all elements 
of\n"
   + "the fsimage enumerated, suitable for further analysis by XML\n"
   + "tools.\n"
-  + "  * reverseXML: This processor takes an XML file and creates a\n"
+  + "  * ReverseXML: This processor takes an XML file and creates a\n"
   + "binary fsimage containing the same elements.\n"
   + "  * FileDistribution: This processor analyzes the file size\n"
   + "distribution in the image.\n"



hadoop git commit: HDFS-9934. ReverseXML oiv processor should bail out if the XML file's layoutVersion doesn't match oiv's (cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b8b580e6b -> c82d1e118


HDFS-9934. ReverseXML oiv processor should bail out if the XML file's 
layoutVersion doesn't match oiv's (cmccabe)

(cherry picked from commit bd49354c6d6387620b0de2219eab1714ec2d64f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c82d1e11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c82d1e11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c82d1e11

Branch: refs/heads/branch-2
Commit: c82d1e1187cf4b09c534a15b0dc294751bf2edae
Parents: b8b580e
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:41:06 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:41:32 2016 -0800

--
 .../OfflineImageReconstructor.java  | 11 +++
 .../TestOfflineImageViewer.java | 34 
 2 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c82d1e11/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index d2ebeb4..dd0d7c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -68,6 +68,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -1497,6 +1498,16 @@ class OfflineImageReconstructor {
   throw new IOException("The  section doesn't contain " +
   "the layoutVersion.");
 }
+if (layoutVersion.intValue() !=
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
+  throw new IOException("Layout version mismatch.  This oiv tool " +
+  "handles layout version " +
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + ", but the " +
+  "XML file has  " + layoutVersion + ".  Please " +
+  "either re-generate the XML file with the proper layout version, " +
+  "or manually edit the XML file to be usable with this version " +
+  "of the oiv tool.");
+}
 fileSummaryBld.setOndiskVersion(onDiskVersion);
 fileSummaryBld.setLayoutVersion(layoutVersion);
 if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c82d1e11/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index c7a6ae9..5d5fea6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -29,6 +29,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.StringReader;
 import java.net.HttpURLConnection;
@@ -64,6 +65,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -499,4 +501,36 @@ public class TestOfflineImageViewer {
 Assert.assertEquals("",
   

hadoop git commit: HDFS-9934. ReverseXML oiv processor should bail out if the XML file's layoutVersion doesn't match oiv's (cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 370a95619 -> 56462678f


HDFS-9934. ReverseXML oiv processor should bail out if the XML file's 
layoutVersion doesn't match oiv's (cmccabe)

(cherry picked from commit bd49354c6d6387620b0de2219eab1714ec2d64f8)
(cherry picked from commit c82d1e1187cf4b09c534a15b0dc294751bf2edae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56462678
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56462678
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56462678

Branch: refs/heads/branch-2.8
Commit: 56462678f9b0ac89cf909e77362ad01c8ff73317
Parents: 370a956
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:41:06 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:44:33 2016 -0800

--
 .../OfflineImageReconstructor.java  | 11 +++
 .../TestOfflineImageViewer.java | 34 
 2 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56462678/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index d2ebeb4..dd0d7c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -68,6 +68,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -1497,6 +1498,16 @@ class OfflineImageReconstructor {
   throw new IOException("The  section doesn't contain " +
   "the layoutVersion.");
 }
+if (layoutVersion.intValue() !=
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
+  throw new IOException("Layout version mismatch.  This oiv tool " +
+  "handles layout version " +
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + ", but the " +
+  "XML file has  " + layoutVersion + ".  Please " +
+  "either re-generate the XML file with the proper layout version, " +
+  "or manually edit the XML file to be usable with this version " +
+  "of the oiv tool.");
+}
 fileSummaryBld.setOndiskVersion(onDiskVersion);
 fileSummaryBld.setLayoutVersion(layoutVersion);
 if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56462678/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index c7a6ae9..5d5fea6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -29,6 +29,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.StringReader;
 import java.net.HttpURLConnection;
@@ -64,6 +65,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -499,4 +501,36 @@ public class 

hadoop git commit: HDFS-9933. ReverseXML should be capitalized in oiv usage message (cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk bd49354c6 -> 79961ecea


HDFS-9933. ReverseXML should be capitalized in oiv usage message (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79961ece
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79961ece
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79961ece

Branch: refs/heads/trunk
Commit: 79961ecea888e0ee85b7a75e239bb6bb3335eb17
Parents: bd49354
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:43:27 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:43:27 2016 -0800

--
 .../hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79961ece/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index e184804..b514b3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -60,7 +60,7 @@ public class OfflineImageViewerPB {
   + "  * XML: This processor creates an XML document with all elements 
of\n"
   + "the fsimage enumerated, suitable for further analysis by XML\n"
   + "tools.\n"
-  + "  * reverseXML: This processor takes an XML file and creates a\n"
+  + "  * ReverseXML: This processor takes an XML file and creates a\n"
   + "binary fsimage containing the same elements.\n"
   + "  * FileDistribution: This processor analyzes the file size\n"
   + "distribution in the image.\n"



hadoop git commit: HDFS-9934. ReverseXML oiv processor should bail out if the XML file's layoutVersion doesn't match oiv's (cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 500875dfc -> bd49354c6


HDFS-9934. ReverseXML oiv processor should bail out if the XML file's 
layoutVersion doesn't match oiv's (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd49354c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd49354c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd49354c

Branch: refs/heads/trunk
Commit: bd49354c6d6387620b0de2219eab1714ec2d64f8
Parents: 500875d
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:41:06 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:41:06 2016 -0800

--
 .../OfflineImageReconstructor.java  | 11 +++
 .../TestOfflineImageViewer.java | 34 
 2 files changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd49354c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index e5d0e2c..73f4a22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -68,6 +68,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -1493,6 +1494,16 @@ class OfflineImageReconstructor {
   throw new IOException("The  section doesn't contain " +
   "the layoutVersion.");
 }
+if (layoutVersion.intValue() !=
+NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
+  throw new IOException("Layout version mismatch.  This oiv tool " +
+  "handles layout version " +
+  NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + ", but the " +
+  "XML file has  " + layoutVersion + ".  Please " +
+  "either re-generate the XML file with the proper layout version, " +
+  "or manually edit the XML file to be usable with this version " +
+  "of the oiv tool.");
+}
 fileSummaryBld.setOndiskVersion(onDiskVersion);
 fileSummaryBld.setLayoutVersion(layoutVersion);
 if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd49354c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index c7a6ae9..5d5fea6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -29,6 +29,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
+import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.StringReader;
 import java.net.HttpURLConnection;
@@ -64,6 +65,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -499,4 +501,36 @@ public class TestOfflineImageViewer {
 Assert.assertEquals("",
   GenericTestUtils.getFilesDiff(reverseImageXml, reverseImage2Xml));
   }
+
+  /**
+   * Tests that the 

hadoop git commit: HDFS-9927. Document the new OIV ReverseXML processor (Wei-Chiu Chuang via cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 92e86ff0c -> 370a95619


HDFS-9927. Document the new OIV ReverseXML processor (Wei-Chiu Chuang via 
cmccabe)

(cherry picked from commit 500875dfccdb3bb6709767962d1927ddb1cc5514)
(cherry picked from commit b8b580e6b8c737c50f687140cf3aeb9ddf08b89e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/370a9561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/370a9561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/370a9561

Branch: refs/heads/branch-2.8
Commit: 370a956196cc8a4bc319cd4b6075b3d13532f1d3
Parents: 92e86ff
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:38:00 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:39:43 2016 -0800

--
 .../hadoop-hdfs/src/site/markdown/HdfsImageViewer.md | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/370a9561/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index 77219d3..de27fc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -38,7 +38,7 @@ The Offline Image Viewer provides several output processors:
 interactively by using HTTP REST API.
 
 2.  XML creates an XML document of the fsimage and includes all of the
-information within the fsimage, similar to the lsr processor. The
+information within the fsimage. The
 output of this processor is amenable to automated processing and
 analysis with XML tools. Due to the verbosity of the XML syntax,
 this processor will also generate the largest amount of output.
@@ -60,6 +60,11 @@ The Offline Image Viewer provides several output processors:
delimiter. The default delimiter is \t, though this may be changed via
the -delimiter argument.
 
+5. ReverseXML (experimental): This is the opposite of the XML processor;
+   it reconstructs an fsimage from an XML file. This processor makes it easy to
+   create fsimages for testing, and manually edit fsimages when there is
+   corruption.
+
 Usage
 -
 
@@ -139,9 +144,9 @@ Options
 
 | **Flag** | **Description** |
 |: |: |
-| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to 
process. Required. |
-| `-o`\|`--outputFile` *output file* | Specify the output filename, if the 
specified output processor generates one. If the specified file already exists, 
it is silently overwritten. (output to stdout by default)\|
-| `-p`\|`--processor` *processor* | Specify the image processor to apply 
against the image file. Currently valid options are Web (default), XML, 
Delimited and FileDistribution. |
+| `-i`\|`--inputFile` *input file* | Specify the input fsimage file (or XML 
file, if ReverseXML processor is used) to process. Required. |
+| `-o`\|`--outputFile` *output file* | Specify the output filename, if the 
specified output processor generates one. If the specified file already exists, 
it is silently overwritten. (output to stdout by default) If the input file is 
an XML file, it also creates an outputFile.md5. |
+| `-p`\|`--processor` *processor* | Specify the image processor to apply 
against the image file. Currently valid options are `Web` (default), `XML`, 
`Delimited`, `FileDistribution` and `ReverseXML`. |
 | `-addr` *address* | Specify the address(host:port) to listen. 
(localhost:5978 by default). This option is used with Web processor. |
 | `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be 
analyzed in bytes (128GB by default). This option is used with FileDistribution 
processor. |
 | `-step` *size* | Specify the granularity of the distribution in bytes (2MB 
by default). This option is used with FileDistribution processor. |
@@ -157,7 +162,7 @@ The Offline Image Viewer makes it easy to gather large 
amounts of data about the
 oiv\_legacy Command
 ---
 
-Due to the internal layout changes introduced by the ProtocolBuffer-based 
fsimage ([HDFS-5698](https://issues.apache.org/jira/browse/HDFS-5698)), 
OfflineImageViewer consumes excessive amount of memory and loses some functions 
such as Indented and Delimited processor. If you want to process without large 
amount of memory or use these processors, you can use `oiv_legacy` command 
(same as `oiv` in Hadoop 2.3).
+Due to the internal layout changes introduced by the ProtocolBuffer-based 
fsimage 

hadoop git commit: HDFS-9927. Document the new OIV ReverseXML processor (Wei-Chiu Chuang via cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2b16a54fb -> b8b580e6b


HDFS-9927. Document the new OIV ReverseXML processor (Wei-Chiu Chuang via 
cmccabe)

(cherry picked from commit 500875dfccdb3bb6709767962d1927ddb1cc5514)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8b580e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8b580e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8b580e6

Branch: refs/heads/branch-2
Commit: b8b580e6b8c737c50f687140cf3aeb9ddf08b89e
Parents: 2b16a54
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:38:00 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:39:20 2016 -0800

--
 .../hadoop-hdfs/src/site/markdown/HdfsImageViewer.md | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8b580e6/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index 77219d3..de27fc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -38,7 +38,7 @@ The Offline Image Viewer provides several output processors:
 interactively by using HTTP REST API.
 
 2.  XML creates an XML document of the fsimage and includes all of the
-information within the fsimage, similar to the lsr processor. The
+information within the fsimage. The
 output of this processor is amenable to automated processing and
 analysis with XML tools. Due to the verbosity of the XML syntax,
 this processor will also generate the largest amount of output.
@@ -60,6 +60,11 @@ The Offline Image Viewer provides several output processors:
delimiter. The default delimiter is \t, though this may be changed via
the -delimiter argument.
 
+5. ReverseXML (experimental): This is the opposite of the XML processor;
+   it reconstructs an fsimage from an XML file. This processor makes it easy to
+   create fsimages for testing, and manually edit fsimages when there is
+   corruption.
+
 Usage
 -
 
@@ -139,9 +144,9 @@ Options
 
 | **Flag** | **Description** |
 |: |: |
-| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to 
process. Required. |
-| `-o`\|`--outputFile` *output file* | Specify the output filename, if the 
specified output processor generates one. If the specified file already exists, 
it is silently overwritten. (output to stdout by default)\|
-| `-p`\|`--processor` *processor* | Specify the image processor to apply 
against the image file. Currently valid options are Web (default), XML, 
Delimited and FileDistribution. |
+| `-i`\|`--inputFile` *input file* | Specify the input fsimage file (or XML 
file, if ReverseXML processor is used) to process. Required. |
+| `-o`\|`--outputFile` *output file* | Specify the output filename, if the 
specified output processor generates one. If the specified file already exists, 
it is silently overwritten. (output to stdout by default) If the input file is 
an XML file, it also creates an outputFile.md5. |
+| `-p`\|`--processor` *processor* | Specify the image processor to apply 
against the image file. Currently valid options are `Web` (default), `XML`, 
`Delimited`, `FileDistribution` and `ReverseXML`. |
 | `-addr` *address* | Specify the address(host:port) to listen. 
(localhost:5978 by default). This option is used with Web processor. |
 | `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be 
analyzed in bytes (128GB by default). This option is used with FileDistribution 
processor. |
 | `-step` *size* | Specify the granularity of the distribution in bytes (2MB 
by default). This option is used with FileDistribution processor. |
@@ -157,7 +162,7 @@ The Offline Image Viewer makes it easy to gather large 
amounts of data about the
 oiv\_legacy Command
 ---
 
-Due to the internal layout changes introduced by the ProtocolBuffer-based 
fsimage ([HDFS-5698](https://issues.apache.org/jira/browse/HDFS-5698)), 
OfflineImageViewer consumes excessive amount of memory and loses some functions 
such as Indented and Delimited processor. If you want to process without large 
amount of memory or use these processors, you can use `oiv_legacy` command 
(same as `oiv` in Hadoop 2.3).
+Due to the internal layout changes introduced by the ProtocolBuffer-based 
fsimage ([HDFS-5698](https://issues.apache.org/jira/browse/HDFS-5698)), 
OfflineImageViewer consumes excessive amount of memory and loses some 

hadoop git commit: HDFS-9927. Document the new OIV ReverseXML processor (Wei-Chiu Chuang via cmccabe)

2016-03-10 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 017d2c127 -> 500875dfc


HDFS-9927. Document the new OIV ReverseXML processor (Wei-Chiu Chuang via 
cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500875df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500875df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500875df

Branch: refs/heads/trunk
Commit: 500875dfccdb3bb6709767962d1927ddb1cc5514
Parents: 017d2c1
Author: Colin Patrick Mccabe 
Authored: Thu Mar 10 13:38:00 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 10 13:38:00 2016 -0800

--
 .../hadoop-hdfs/src/site/markdown/HdfsImageViewer.md | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500875df/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index 77219d3..de27fc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -38,7 +38,7 @@ The Offline Image Viewer provides several output processors:
 interactively by using HTTP REST API.
 
 2.  XML creates an XML document of the fsimage and includes all of the
-information within the fsimage, similar to the lsr processor. The
+information within the fsimage. The
 output of this processor is amenable to automated processing and
 analysis with XML tools. Due to the verbosity of the XML syntax,
 this processor will also generate the largest amount of output.
@@ -60,6 +60,11 @@ The Offline Image Viewer provides several output processors:
delimiter. The default delimiter is \t, though this may be changed via
the -delimiter argument.
 
+5. ReverseXML (experimental): This is the opposite of the XML processor;
+   it reconstructs an fsimage from an XML file. This processor makes it easy to
+   create fsimages for testing, and manually edit fsimages when there is
+   corruption.
+
 Usage
 -
 
@@ -139,9 +144,9 @@ Options
 
 | **Flag** | **Description** |
 |: |: |
-| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to 
process. Required. |
-| `-o`\|`--outputFile` *output file* | Specify the output filename, if the 
specified output processor generates one. If the specified file already exists, 
it is silently overwritten. (output to stdout by default)\|
-| `-p`\|`--processor` *processor* | Specify the image processor to apply 
against the image file. Currently valid options are Web (default), XML, 
Delimited and FileDistribution. |
+| `-i`\|`--inputFile` *input file* | Specify the input fsimage file (or XML 
file, if ReverseXML processor is used) to process. Required. |
+| `-o`\|`--outputFile` *output file* | Specify the output filename, if the 
specified output processor generates one. If the specified file already exists, 
it is silently overwritten. (output to stdout by default) If the input file is 
an XML file, it also creates an outputFile.md5. |
+| `-p`\|`--processor` *processor* | Specify the image processor to apply 
against the image file. Currently valid options are `Web` (default), `XML`, 
`Delimited`, `FileDistribution` and `ReverseXML`. |
 | `-addr` *address* | Specify the address(host:port) to listen. 
(localhost:5978 by default). This option is used with Web processor. |
 | `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be 
analyzed in bytes (128GB by default). This option is used with FileDistribution 
processor. |
 | `-step` *size* | Specify the granularity of the distribution in bytes (2MB 
by default). This option is used with FileDistribution processor. |
@@ -157,7 +162,7 @@ The Offline Image Viewer makes it easy to gather large 
amounts of data about the
 oiv\_legacy Command
 ---
 
-Due to the internal layout changes introduced by the ProtocolBuffer-based 
fsimage ([HDFS-5698](https://issues.apache.org/jira/browse/HDFS-5698)), 
OfflineImageViewer consumes excessive amount of memory and loses some functions 
such as Indented and Delimited processor. If you want to process without large 
amount of memory or use these processors, you can use `oiv_legacy` command 
(same as `oiv` in Hadoop 2.3).
+Due to the internal layout changes introduced by the ProtocolBuffer-based 
fsimage ([HDFS-5698](https://issues.apache.org/jira/browse/HDFS-5698)), 
OfflineImageViewer consumes excessive amount of memory and loses some functions 
such as Indented processor. If you want to process without large 

hadoop git commit: YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test setup, and concurrency.

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8e89bb9d3 -> 92e86ff0c


YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test 
setup, and concurrency.

This commit amends commit d49cfb350454c2dfa2f3eb70f79b6d5030ce7bec with a 
missed test file.

(cherry picked from commit 017d2c127b9cbd75d3e31467172ed832f27ef826)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92e86ff0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92e86ff0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92e86ff0

Branch: refs/heads/branch-2.8
Commit: 92e86ff0cb2c8438553a318b6534fee5b9e634aa
Parents: 8e89bb9
Author: Li Lu 
Authored: Thu Mar 10 13:02:28 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 13:06:02 2016 -0800

--
 .../TestOverrideTimelineStoreYarnClient.java| 56 
 1 file changed, 56 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e86ff0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
new file mode 100644
index 000..c190266
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestOverrideTimelineStoreYarnClient {
+
+  @Test
+  public void testLifecycleAndOverride() throws Throwable {
+YarnConfiguration conf = new YarnConfiguration();
+try(NoRMStore store = new NoRMStore()) {
+  store.init(conf);
+  store.start();
+  Assert.assertEquals(EntityGroupFSTimelineStore.AppState.ACTIVE,
+  store.getAppState(ApplicationId.newInstance(1, 1)));
+  store.stop();
+}
+  }
+
+  private static class NoRMStore extends EntityGroupFSTimelineStore {
+@Override
+protected YarnClient createAndInitYarnClient(Configuration conf) {
+  return null;
+}
+
+@Override
+protected AppState getAppState(ApplicationId appId)
+throws IOException {
+  return AppState.ACTIVE;
+}
+  }
+}



hadoop git commit: YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test setup, and concurrency.

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 92125e605 -> 2b16a54fb


YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test 
setup, and concurrency.

This commit amends commit d49cfb350454c2dfa2f3eb70f79b6d5030ce7bec with a 
missed test file.

(cherry picked from commit 017d2c127b9cbd75d3e31467172ed832f27ef826)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b16a54f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b16a54f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b16a54f

Branch: refs/heads/branch-2
Commit: 2b16a54fbef2893245f392ee058f10c2845f1dc1
Parents: 92125e6
Author: Li Lu 
Authored: Thu Mar 10 13:02:28 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 13:04:57 2016 -0800

--
 .../TestOverrideTimelineStoreYarnClient.java| 56 
 1 file changed, 56 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b16a54f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
new file mode 100644
index 000..c190266
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestOverrideTimelineStoreYarnClient {
+
+  @Test
+  public void testLifecycleAndOverride() throws Throwable {
+YarnConfiguration conf = new YarnConfiguration();
+try(NoRMStore store = new NoRMStore()) {
+  store.init(conf);
+  store.start();
+  Assert.assertEquals(EntityGroupFSTimelineStore.AppState.ACTIVE,
+  store.getAppState(ApplicationId.newInstance(1, 1)));
+  store.stop();
+}
+  }
+
+  private static class NoRMStore extends EntityGroupFSTimelineStore {
+@Override
+protected YarnClient createAndInitYarnClient(Configuration conf) {
+  return null;
+}
+
+@Override
+protected AppState getAppState(ApplicationId appId)
+throws IOException {
+  return AppState.ACTIVE;
+}
+  }
+}



hadoop git commit: YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test setup, and concurrency.

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9a79b738c -> 017d2c127


YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test 
setup, and concurrency.

This commit amends commit d49cfb350454c2dfa2f3eb70f79b6d5030ce7bec with a 
missed test file.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/017d2c12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/017d2c12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/017d2c12

Branch: refs/heads/trunk
Commit: 017d2c127b9cbd75d3e31467172ed832f27ef826
Parents: 9a79b73
Author: Li Lu 
Authored: Thu Mar 10 13:02:28 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 13:02:28 2016 -0800

--
 .../TestOverrideTimelineStoreYarnClient.java| 56 
 1 file changed, 56 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/017d2c12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
new file mode 100644
index 000..c190266
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestOverrideTimelineStoreYarnClient.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestOverrideTimelineStoreYarnClient {
+
+  @Test
+  public void testLifecycleAndOverride() throws Throwable {
+YarnConfiguration conf = new YarnConfiguration();
+try(NoRMStore store = new NoRMStore()) {
+  store.init(conf);
+  store.start();
+  Assert.assertEquals(EntityGroupFSTimelineStore.AppState.ACTIVE,
+  store.getAppState(ApplicationId.newInstance(1, 1)));
+  store.stop();
+}
+  }
+
+  private static class NoRMStore extends EntityGroupFSTimelineStore {
+@Override
+protected YarnClient createAndInitYarnClient(Configuration conf) {
+  return null;
+}
+
+@Override
+protected AppState getAppState(ApplicationId appId)
+throws IOException {
+  return AppState.ACTIVE;
+}
+  }
+}



hadoop git commit: HADOOP-12906. AuthenticatedURL should convert a 404/Not Found into an FileNotFoundException. (Steve Loughran via gtcarrera9)

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 76ef097fd -> 92125e605


HADOOP-12906. AuthenticatedURL should convert a 404/Not Found into an 
FileNotFoundException. (Steve Loughran via gtcarrera9)

(cherry-picked from commit 9a79b738c582bd84727831987b845535625d75fe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92125e60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92125e60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92125e60

Branch: refs/heads/branch-2
Commit: 92125e60579a4e02e18aa11b2325328c34a3ae29
Parents: 76ef097
Author: Li Lu 
Authored: Thu Mar 10 11:38:31 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 11:39:44 2016 -0800

--
 .../security/authentication/client/AuthenticatedURL.java  | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92125e60/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index c50a516..f87d9d8 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -15,6 +15,7 @@ package org.apache.hadoop.security.authentication.client;
 
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
@@ -269,10 +270,15 @@ public class AuthenticatedURL {
   }
 }
   }
+} else if (respCode == HttpURLConnection.HTTP_NOT_FOUND) {
+  token.set(null);
+  throw new FileNotFoundException(conn.getURL().toString());
 } else {
   token.set(null);
-  throw new AuthenticationException("Authentication failed, status: " + 
conn.getResponseCode() +
-", message: " + 
conn.getResponseMessage());
+  throw new AuthenticationException("Authentication failed" +
+  ", URL: " + conn.getURL() +
+  ", status: " + conn.getResponseCode() +
+  ", message: " + conn.getResponseMessage());
 }
   }
 



hadoop git commit: HADOOP-12906. AuthenticatedURL should convert a 404/Not Found into an FileNotFoundException. (Steve Loughran via gtcarrera9)

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/trunk d49cfb350 -> 9a79b738c


HADOOP-12906. AuthenticatedURL should convert a 404/Not Found into an 
FileNotFoundException. (Steve Loughran via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a79b738
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a79b738
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a79b738

Branch: refs/heads/trunk
Commit: 9a79b738c582bd84727831987b845535625d75fe
Parents: d49cfb3
Author: Li Lu 
Authored: Thu Mar 10 11:38:31 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 11:38:31 2016 -0800

--
 .../security/authentication/client/AuthenticatedURL.java  | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a79b738/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index c50a516..f87d9d8 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -15,6 +15,7 @@ package org.apache.hadoop.security.authentication.client;
 
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
@@ -269,10 +270,15 @@ public class AuthenticatedURL {
   }
 }
   }
+} else if (respCode == HttpURLConnection.HTTP_NOT_FOUND) {
+  token.set(null);
+  throw new FileNotFoundException(conn.getURL().toString());
 } else {
   token.set(null);
-  throw new AuthenticationException("Authentication failed, status: " + 
conn.getResponseCode() +
-", message: " + 
conn.getResponseMessage());
+  throw new AuthenticationException("Authentication failed" +
+  ", URL: " + conn.getURL() +
+  ", status: " + conn.getResponseCode() +
+  ", message: " + conn.getResponseMessage());
 }
   }
 



hadoop git commit: YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test setup, and concurrency. (Steve Loughran via gtcarrera9)

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a7164e90b -> 8e89bb9d3


YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test 
setup, and concurrency. (Steve Loughran via gtcarrera9)

(cherry-picked from commit d49cfb350454c2dfa2f3eb70f79b6d5030ce7bec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e89bb9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e89bb9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e89bb9d

Branch: refs/heads/branch-2.8
Commit: 8e89bb9d380f25ca18bf0bed5e86a3e139977e36
Parents: a7164e9
Author: Li Lu 
Authored: Thu Mar 10 10:51:55 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 10:58:51 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../hadoop/yarn/client/api/TimelineClient.java  |   4 +-
 .../api/impl/FileSystemTimelineWriter.java  |  51 ++---
 .../client/api/impl/TimelineClientImpl.java |  13 ++
 .../yarn/client/api/impl/TimelineWriter.java|  40 +++-
 .../timeline/webapp/TimelineWebServices.java|  12 +-
 .../yarn/server/timeline/EntityCacheItem.java   |  40 ++--
 .../timeline/EntityGroupFSTimelineStore.java| 204 ++-
 .../hadoop/yarn/server/timeline/LogInfo.java|  11 +-
 .../TestEntityGroupFSTimelineStore.java |   8 +-
 10 files changed, 279 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e89bb9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 52aa953..5e1c6fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1679,6 +1679,12 @@ public class YarnConfiguration extends Configuration {
   public static final long
   TIMELINE_SERVICE_CLIENT_INTERNAL_TIMERS_TTL_SECS_DEFAULT = 7 * 60;
 
+  // This is temporary solution. The configuration will be deleted once we have
+  // the FileSystem API to check whether append operation is supported or not.
+  public static final String TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND
+  = TIMELINE_SERVICE_PREFIX
+  + "entity-file.fs-support-append";
+
   // mark app-history related configs @Private as application history is going
   // to be integrated into the timeline service
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e89bb9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
index 258b9f5..09298b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.client.api;
 
+import java.io.Flushable;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -41,7 +42,8 @@ import 
org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
  */
 @Public
 @Evolving
-public abstract class TimelineClient extends AbstractService {
+public abstract class TimelineClient extends AbstractService implements
+Flushable {
 
   /**
* Create a timeline client. The current UGI when the user initialize the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e89bb9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index aa1f1f8..9e719b7 100644
--- 

hadoop git commit: YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test setup, and concurrency. (Steve Loughran via gtcarrera9)

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e32aa547 -> 76ef097fd


YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test 
setup, and concurrency. (Steve Loughran via gtcarrera9)

(cherry-picked from commit d49cfb350454c2dfa2f3eb70f79b6d5030ce7bec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76ef097f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76ef097f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76ef097f

Branch: refs/heads/branch-2
Commit: 76ef097fd10af7bf4b876bcfec775f8049ac2034
Parents: 2e32aa5
Author: Li Lu 
Authored: Thu Mar 10 10:51:55 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 10:56:51 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../hadoop/yarn/client/api/TimelineClient.java  |   4 +-
 .../api/impl/FileSystemTimelineWriter.java  |  51 ++---
 .../client/api/impl/TimelineClientImpl.java |  13 ++
 .../yarn/client/api/impl/TimelineWriter.java|  40 +++-
 .../timeline/webapp/TimelineWebServices.java|  12 +-
 .../yarn/server/timeline/EntityCacheItem.java   |  40 ++--
 .../timeline/EntityGroupFSTimelineStore.java| 204 ++-
 .../hadoop/yarn/server/timeline/LogInfo.java|  11 +-
 .../TestEntityGroupFSTimelineStore.java |   8 +-
 10 files changed, 279 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ef097f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 4573404..174d0c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1747,6 +1747,12 @@ public class YarnConfiguration extends Configuration {
   public static final long
   TIMELINE_SERVICE_CLIENT_INTERNAL_TIMERS_TTL_SECS_DEFAULT = 7 * 60;
 
+  // This is temporary solution. The configuration will be deleted once we have
+  // the FileSystem API to check whether append operation is supported or not.
+  public static final String TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND
+  = TIMELINE_SERVICE_PREFIX
+  + "entity-file.fs-support-append";
+
   // mark app-history related configs @Private as application history is going
   // to be integrated into the timeline service
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ef097f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
index 258b9f5..09298b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.client.api;
 
+import java.io.Flushable;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -41,7 +42,8 @@ import 
org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
  */
 @Public
 @Evolving
-public abstract class TimelineClient extends AbstractService {
+public abstract class TimelineClient extends AbstractService implements
+Flushable {
 
   /**
* Create a timeline client. The current UGI when the user initialize the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ef097f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index aa1f1f8..9e719b7 100644
--- 

hadoop git commit: YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test setup, and concurrency. (Steve Loughran via gtcarrera9)

2016-03-10 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 318c9b68b -> d49cfb350


YARN-4696. Improving EntityGroupFSTimelineStore on exception handling, test 
setup, and concurrency. (Steve Loughran via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d49cfb35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d49cfb35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d49cfb35

Branch: refs/heads/trunk
Commit: d49cfb350454c2dfa2f3eb70f79b6d5030ce7bec
Parents: 318c9b6
Author: Li Lu 
Authored: Thu Mar 10 10:51:55 2016 -0800
Committer: Li Lu 
Committed: Thu Mar 10 10:51:55 2016 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   6 +
 .../hadoop/yarn/client/api/TimelineClient.java  |   4 +-
 .../api/impl/FileSystemTimelineWriter.java  |  51 ++---
 .../client/api/impl/TimelineClientImpl.java |  13 ++
 .../yarn/client/api/impl/TimelineWriter.java|  40 +++-
 .../timeline/webapp/TimelineWebServices.java|  12 +-
 .../yarn/server/timeline/EntityCacheItem.java   |  40 ++--
 .../timeline/EntityGroupFSTimelineStore.java| 204 ++-
 .../hadoop/yarn/server/timeline/LogInfo.java|  11 +-
 .../TestEntityGroupFSTimelineStore.java |   8 +-
 10 files changed, 279 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49cfb35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 61d1d72..ff4b493 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1747,6 +1747,12 @@ public class YarnConfiguration extends Configuration {
   public static final long
   TIMELINE_SERVICE_CLIENT_INTERNAL_TIMERS_TTL_SECS_DEFAULT = 7 * 60;
 
+  // This is temporary solution. The configuration will be deleted once we have
+  // the FileSystem API to check whether append operation is supported or not.
+  public static final String TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND
+  = TIMELINE_SERVICE_PREFIX
+  + "entity-file.fs-support-append";
+
   // mark app-history related configs @Private as application history is going
   // to be integrated into the timeline service
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49cfb35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
index 258b9f5..09298b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineClient.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.client.api;
 
+import java.io.Flushable;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -41,7 +42,8 @@ import 
org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
  */
 @Public
 @Evolving
-public abstract class TimelineClient extends AbstractService {
+public abstract class TimelineClient extends AbstractService implements
+Flushable {
 
   /**
* Create a timeline client. The current UGI when the user initialize the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d49cfb35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index aa1f1f8..9e719b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java

hadoop git commit: HADOOP-11404. Clarify the "expected client Kerberos principal is null" authorization message. Contributed by Stephen Chu

2016-03-10 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95d30a6ec -> 2e32aa547


HADOOP-11404. Clarify the "expected client Kerberos principal is null" 
authorization message. Contributed by Stephen Chu

(cherry picked from commit 318c9b68b059981796f2742b4b7ee604ccdc47e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e32aa54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e32aa54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e32aa54

Branch: refs/heads/branch-2
Commit: 2e32aa5475ebf0d6d0e001f8b7c9192dae036156
Parents: 95d30a6
Author: Harsh J 
Authored: Thu Mar 10 17:05:09 2016 +0530
Committer: Harsh J 
Committed: Thu Mar 10 17:09:56 2016 +0530

--
 .../security/authorize/ServiceAuthorizationManager.java | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e32aa54/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 5d29516..9da95dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -116,11 +116,13 @@ public class ServiceAuthorizationManager {
 }
 if((clientPrincipal != null && 
!clientPrincipal.equals(user.getUserName())) || 
acls.length != 2  || !acls[0].isUserAllowed(user) || 
acls[1].isUserAllowed(user)) {
-  AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol
-  + ", expected client Kerberos principal is " + clientPrincipal);
-  throw new AuthorizationException("User " + user + 
-  " is not authorized for protocol " + protocol + 
-  ", expected client Kerberos principal is " + clientPrincipal);
+  String cause = clientPrincipal != null ?
+  ": this service is only accessible by " + clientPrincipal :
+  ": denied by configured ACL";
+  AUDITLOG.warn(AUTHZ_FAILED_FOR + user
+  + " for protocol=" + protocol + cause);
+  throw new AuthorizationException("User " + user +
+  " is not authorized for protocol " + protocol + cause);
 }
 if (addr != null) {
   String hostAddress = addr.getHostAddress();



hadoop git commit: HADOOP-11404. Clarify the "expected client Kerberos principal is null" authorization message. Contributed by Stephen Chu

2016-03-10 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e040d31c -> 318c9b68b


HADOOP-11404. Clarify the "expected client Kerberos principal is null" 
authorization message. Contributed by Stephen Chu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/318c9b68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/318c9b68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/318c9b68

Branch: refs/heads/trunk
Commit: 318c9b68b059981796f2742b4b7ee604ccdc47e5
Parents: 2e040d3
Author: Harsh J 
Authored: Thu Mar 10 17:05:09 2016 +0530
Committer: Harsh J 
Committed: Thu Mar 10 17:05:09 2016 +0530

--
 .../security/authorize/ServiceAuthorizationManager.java | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/318c9b68/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 5d29516..9da95dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -116,11 +116,13 @@ public class ServiceAuthorizationManager {
 }
 if((clientPrincipal != null && 
!clientPrincipal.equals(user.getUserName())) || 
acls.length != 2  || !acls[0].isUserAllowed(user) || 
acls[1].isUserAllowed(user)) {
-  AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol
-  + ", expected client Kerberos principal is " + clientPrincipal);
-  throw new AuthorizationException("User " + user + 
-  " is not authorized for protocol " + protocol + 
-  ", expected client Kerberos principal is " + clientPrincipal);
+  String cause = clientPrincipal != null ?
+  ": this service is only accessible by " + clientPrincipal :
+  ": denied by configured ACL";
+  AUDITLOG.warn(AUTHZ_FAILED_FOR + user
+  + " for protocol=" + protocol + cause);
+  throw new AuthorizationException("User " + user +
+  " is not authorized for protocol " + protocol + cause);
 }
 if (addr != null) {
   String hostAddress = addr.getHostAddress();