hadoop git commit: HADOOP-14383. Implement FileSystem that reads from HTTP / HTTPS endpoints.

2017-05-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 424887ecb -> ff5ec3b84


HADOOP-14383. Implement FileSystem that reads from HTTP / HTTPS endpoints.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff5ec3b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff5ec3b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff5ec3b8

Branch: refs/heads/trunk
Commit: ff5ec3b841612f7f28ad8be5bbfec4168a8ac1f0
Parents: 424887e
Author: Haohui Mai 
Authored: Thu May 4 17:27:44 2017 -0700
Committer: Haohui Mai 
Committed: Mon May 8 15:28:45 2017 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |   5 +
 .../hadoop/fs/http/AbstractHttpFileSystem.java  | 153 +++
 .../apache/hadoop/fs/http/HttpFileSystem.java   |  28 
 .../apache/hadoop/fs/http/HttpsFileSystem.java  |  28 
 .../org/apache/hadoop/fs/http/package-info.java |  23 +++
 .../services/org.apache.hadoop.fs.FileSystem|   2 +
 .../hadoop/fs/http/TestHttpFileSystem.java  |  67 
 hadoop-project/pom.xml  |   6 +
 8 files changed, 312 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index f76575d..e8b5317 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -318,6 +318,11 @@
   aalto-xml
   compile
 
+
+  com.squareup.okhttp3
+  mockwebserver
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
new file mode 100644
index 000..fa0b2cf
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URLConnection;
+
+abstract class AbstractHttpFileSystem extends FileSystem {
+  private static final long DEFAULT_BLOCK_SIZE = 4096;
+  private static final Path WORKING_DIR = new Path("/");
+
+  private URI uri;
+
+  @Override
+  public void initialize(URI name, Configuration conf) throws IOException {
+super.initialize(name, conf);
+this.uri = name;
+  }
+
+  public abstract String getScheme();
+
+  @Override
+  public URI getUri() {
+return uri;
+  }
+
+  @Override
+  public FSDataInputStream open(Path path, int bufferSize) throws IOException {
+URLConnection conn = path.toUri().toURL().openConnection();
+InputStream in = conn.getInputStream();
+return new FSDataInputStream(new HttpDataInputStream(in));
+  }
+
+  @Override
+  public FSDataOutputStream create(Path path, FsPermission fsPermission,
+   boolean b, int i, short i1, long l,
+   Progressable progressable)
+  throws IOException {
+throw 

hadoop git commit: HADOOP-14383. Implement FileSystem that reads from HTTP / HTTPS endpoints.

2017-05-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6ca0c134e -> b8870d815


HADOOP-14383. Implement FileSystem that reads from HTTP / HTTPS endpoints.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8870d81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8870d81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8870d81

Branch: refs/heads/branch-2
Commit: b8870d815948bbcd205ad3dee9b206440f923223
Parents: 6ca0c13
Author: Haohui Mai 
Authored: Thu May 4 17:27:44 2017 -0700
Committer: Haohui Mai 
Committed: Mon May 8 15:28:30 2017 -0700

--
 hadoop-common-project/hadoop-common/pom.xml |   5 +
 .../hadoop/fs/http/AbstractHttpFileSystem.java  | 153 +++
 .../apache/hadoop/fs/http/HttpFileSystem.java   |  28 
 .../apache/hadoop/fs/http/HttpsFileSystem.java  |  28 
 .../org/apache/hadoop/fs/http/package-info.java |  23 +++
 .../services/org.apache.hadoop.fs.FileSystem|   2 +
 .../hadoop/fs/http/TestHttpFileSystem.java  |  67 
 hadoop-project/pom.xml  |   6 +
 8 files changed, 312 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8870d81/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 8b39a7f..a2f5a14 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -306,6 +306,11 @@
   aalto-xml
   compile
 
+
+  com.squareup.okhttp3
+  mockwebserver
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8870d81/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
new file mode 100644
index 000..fa0b2cf
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URLConnection;
+
+abstract class AbstractHttpFileSystem extends FileSystem {
+  private static final long DEFAULT_BLOCK_SIZE = 4096;
+  private static final Path WORKING_DIR = new Path("/");
+
+  private URI uri;
+
+  @Override
+  public void initialize(URI name, Configuration conf) throws IOException {
+super.initialize(name, conf);
+this.uri = name;
+  }
+
+  public abstract String getScheme();
+
+  @Override
+  public URI getUri() {
+return uri;
+  }
+
+  @Override
+  public FSDataInputStream open(Path path, int bufferSize) throws IOException {
+URLConnection conn = path.toUri().toURL().openConnection();
+InputStream in = conn.getInputStream();
+return new FSDataInputStream(new HttpDataInputStream(in));
+  }
+
+  @Override
+  public FSDataOutputStream create(Path path, FsPermission fsPermission,
+   boolean b, int i, short i1, long l,
+   Progressable progressable)
+  throws IOException {
+

hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2017-01-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 04ccb1bec -> 977fb0c52


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/977fb0c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/977fb0c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/977fb0c5

Branch: refs/heads/branch-2.7
Commit: 977fb0c52468d3065c7e887580c050ac04b403e3
Parents: 04ccb1b
Author: Haohui Mai 
Authored: Wed Jan 4 21:01:23 2017 -0800
Committer: Haohui Mai 
Committed: Wed Jan 4 21:16:20 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/977fb0c5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index ccfed0b..e1bb4c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -545,6 +545,8 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
+  // TODO: consider not calling conn.disconnect() to allow connection 
reuse
+  // See http://tinyurl.com/java7-http-keepalive
   conn.disconnect();
 }
   }
@@ -774,7 +776,9 @@ public class WebHdfsFileSystem extends FileSystem
 }
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -820,6 +824,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2017-01-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 42d1b3898 -> e690cd501


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e690cd50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e690cd50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e690cd50

Branch: refs/heads/branch-2.8
Commit: e690cd5011dabea730bf6cb822e7d31e8c4b719a
Parents: 42d1b38
Author: Haohui Mai 
Authored: Wed Jan 4 21:01:23 2017 -0800
Committer: Haohui Mai 
Committed: Wed Jan 4 21:15:01 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e690cd50/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 166e3c3..1a180b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -663,6 +663,8 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
+  // TODO: consider not calling conn.disconnect() to allow connection 
reuse
+  // See http://tinyurl.com/java7-http-keepalive
   conn.disconnect();
 }
   }
@@ -895,7 +897,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -942,6 +946,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2017-01-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d4ca1c522 -> 20486730e


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20486730
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20486730
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20486730

Branch: refs/heads/branch-2
Commit: 20486730e894492065352d6c7896c3ca0722de35
Parents: d4ca1c5
Author: Haohui Mai 
Authored: Wed Jan 4 21:01:23 2017 -0800
Committer: Haohui Mai 
Committed: Wed Jan 4 21:03:05 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20486730/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 21f45d6..1c32657 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -667,6 +667,8 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
+  // TODO: consider not calling conn.disconnect() to allow connection 
reuse
+  // See http://tinyurl.com/java7-http-keepalive
   conn.disconnect();
 }
   }
@@ -899,7 +901,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -946,6 +950,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2017-01-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ed63e3e9 -> a605ff36a


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a605ff36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a605ff36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a605ff36

Branch: refs/heads/trunk
Commit: a605ff36a53a3d1283c3f6d81eb073e4a2942143
Parents: 5ed63e3
Author: Haohui Mai 
Authored: Wed Jan 4 21:01:23 2017 -0800
Committer: Haohui Mai 
Committed: Wed Jan 4 21:01:23 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a605ff36/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index d4fa009..135eef7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -659,6 +659,8 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
+  // TODO: consider not calling conn.disconnect() to allow connection 
reuse
+  // See http://tinyurl.com/java7-http-keepalive
   conn.disconnect();
 }
   }
@@ -891,7 +893,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -938,6 +942,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2016-12-31 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7820eeb26 -> d90f7412f


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d90f7412
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d90f7412
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d90f7412

Branch: refs/heads/branch-2.7
Commit: d90f7412f2162550d3cc9a3ae33f330e7114bc5c
Parents: 7820eeb
Author: Haohui Mai 
Authored: Fri Dec 30 22:17:49 2016 -0800
Committer: Haohui Mai 
Committed: Sat Dec 31 01:12:28 2016 -0800

--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d90f7412/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index ccfed0b..a599705 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -545,7 +545,9 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
-  conn.disconnect();
+  // Don't call conn.disconnect() to allow connection reuse
+  // See http://tinyurl.com/java7-http-keepalive
+  conn.getInputStream().close();
 }
   }
   try {
@@ -774,7 +776,9 @@ public class WebHdfsFileSystem extends FileSystem
 }
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -820,6 +824,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2016-12-31 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d7b80f1b0 -> 0ff9efcf3


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ff9efcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ff9efcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ff9efcf

Branch: refs/heads/branch-2.8
Commit: 0ff9efcf37e7e86c6b7e187df8e676c1e93fe0e7
Parents: d7b80f1
Author: Haohui Mai 
Authored: Fri Dec 30 22:17:49 2016 -0800
Committer: Haohui Mai 
Committed: Sat Dec 31 01:07:41 2016 -0800

--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ff9efcf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 166e3c3..4e143cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -663,7 +663,9 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
-  conn.disconnect();
+  // Don't call conn.disconnect() to allow connection reuse
+  // See http://tinyurl.com/java7-http-keepalive
+  conn.getInputStream().close();
 }
   }
   try {
@@ -895,7 +897,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -942,6 +946,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2016-12-31 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8cb7aa2b5 -> 1bc9b316b


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc9b316
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc9b316
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc9b316

Branch: refs/heads/branch-2
Commit: 1bc9b316ba60b1ecc8d6fca8d78642933d2c186b
Parents: 8cb7aa2
Author: Haohui Mai 
Authored: Fri Dec 30 22:17:49 2016 -0800
Committer: Haohui Mai 
Committed: Fri Dec 30 22:59:24 2016 -0800

--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc9b316/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 1607be9..4cc5b67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -667,7 +667,9 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
-  conn.disconnect();
+  // Don't call conn.disconnect() to allow connection reuse
+  // See http://tinyurl.com/java7-http-keepalive
+  conn.getInputStream().close();
 }
   }
   try {
@@ -899,7 +901,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -946,6 +950,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2016-12-30 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 165d01a73 -> b811a1c14


HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b811a1c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b811a1c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b811a1c1

Branch: refs/heads/trunk
Commit: b811a1c14d00ab236158ab75fad1fe41364045a4
Parents: 165d01a
Author: Haohui Mai 
Authored: Fri Dec 30 22:17:49 2016 -0800
Committer: Haohui Mai 
Committed: Fri Dec 30 22:17:49 2016 -0800

--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b811a1c1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 26cfc01..c89913e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -659,7 +659,9 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
-  conn.disconnect();
+  // Don't call conn.disconnect() to allow connection reuse
+  // See http://tinyurl.com/java7-http-keepalive
+  conn.getInputStream().close();
 }
   }
   try {
@@ -891,7 +893,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -938,6 +942,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy Xiang.

2016-12-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc42dbb70 -> de926f93c


HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy 
Xiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de926f93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de926f93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de926f93

Branch: refs/heads/branch-2
Commit: de926f93c23b93f1ac440ad1d9edc482eb5741d9
Parents: cc42dbb
Author: Haohui Mai 
Authored: Wed Dec 21 13:04:03 2016 -0800
Committer: Haohui Mai 
Committed: Wed Dec 21 14:22:09 2016 -0800

--
 .../hdfs/server/namenode/FSDirAttrOp.java   | 16 ++--
 .../hdfs/server/namenode/TestFSDirAttrOp.java   | 80 
 2 files changed, 86 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de926f93/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 6c2506b..c9663dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -488,17 +488,13 @@ public class FSDirAttrOp {
   inode = inode.setModificationTime(mtime, latest);
   status = true;
 }
-if (atime != -1) {
-  long inodeTime = inode.getAccessTime();
 
-  // if the last access time update was within the last precision 
interval, then
-  // no need to store access time
-  if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
-status =  false;
-  } else {
-inode.setAccessTime(atime, latest);
-status = true;
-  }
+// if the last access time update was within the last precision interval,
+// then no need to store access time
+if (atime != -1 && (status || force
+|| atime > inode.getAccessTime() + fsd.getAccessTimePrecision())) {
+  inode.setAccessTime(atime, latest);
+  status = true;
 }
 return status;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de926f93/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
new file mode 100644
index 000..c75e8ec
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test {@link FSDirAttrOp}.
+ */
+public class TestFSDirAttrOp {
+  public static final Log LOG = LogFactory.getLog(TestFSDirAttrOp.class);
+
+  private boolean unprotectedSetTimes(long atime, long atime0, long precision,
+  long mtime, boolean force) throws QuotaExceededException, 
UnresolvedLinkException {
+FSDirectory fsd = Mockito.mock(FSDirectory.class);
+

hadoop git commit: HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy Xiang.

2016-12-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ae053df2d -> 72a8c8dc6


HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy 
Xiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72a8c8dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72a8c8dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72a8c8dc

Branch: refs/heads/branch-2.8
Commit: 72a8c8dc6f5f21afa1b99b90037694c1aa29a9e0
Parents: ae053df
Author: Haohui Mai 
Authored: Wed Dec 21 13:04:03 2016 -0800
Committer: Haohui Mai 
Committed: Wed Dec 21 14:22:27 2016 -0800

--
 .../hdfs/server/namenode/FSDirAttrOp.java   | 16 ++--
 .../hdfs/server/namenode/TestFSDirAttrOp.java   | 80 
 2 files changed, 86 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72a8c8dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 6c2506b..c9663dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -488,17 +488,13 @@ public class FSDirAttrOp {
   inode = inode.setModificationTime(mtime, latest);
   status = true;
 }
-if (atime != -1) {
-  long inodeTime = inode.getAccessTime();
 
-  // if the last access time update was within the last precision 
interval, then
-  // no need to store access time
-  if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
-status =  false;
-  } else {
-inode.setAccessTime(atime, latest);
-status = true;
-  }
+// if the last access time update was within the last precision interval,
+// then no need to store access time
+if (atime != -1 && (status || force
+|| atime > inode.getAccessTime() + fsd.getAccessTimePrecision())) {
+  inode.setAccessTime(atime, latest);
+  status = true;
 }
 return status;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72a8c8dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
new file mode 100644
index 000..c75e8ec
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test {@link FSDirAttrOp}.
+ */
+public class TestFSDirAttrOp {
+  public static final Log LOG = LogFactory.getLog(TestFSDirAttrOp.class);
+
+  private boolean unprotectedSetTimes(long atime, long atime0, long precision,
+  long mtime, boolean force) throws QuotaExceededException, 
UnresolvedLinkException {
+FSDirectory fsd = Mockito.mock(FSDirectory.class);
+

hadoop git commit: HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy Xiang.

2016-12-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha1 9d11d6663 -> 8937a4a85


HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy 
Xiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8937a4a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8937a4a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8937a4a8

Branch: refs/heads/branch-3.0.0-alpha1
Commit: 8937a4a85b2c64cb6dda3592b5cbf6eb5a5e2b3c
Parents: 9d11d66
Author: Haohui Mai 
Authored: Wed Dec 21 13:04:03 2016 -0800
Committer: Haohui Mai 
Committed: Wed Dec 21 14:21:39 2016 -0800

--
 .../hdfs/server/namenode/FSDirAttrOp.java   | 16 ++--
 .../hdfs/server/namenode/TestFSDirAttrOp.java   | 80 
 2 files changed, 86 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8937a4a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index e19341c..c56d574 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -517,17 +517,13 @@ public class FSDirAttrOp {
   inode = inode.setModificationTime(mtime, latest);
   status = true;
 }
-if (atime != -1) {
-  long inodeTime = inode.getAccessTime();
 
-  // if the last access time update was within the last precision 
interval, then
-  // no need to store access time
-  if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
-status =  false;
-  } else {
-inode.setAccessTime(atime, latest);
-status = true;
-  }
+// if the last access time update was within the last precision interval,
+// then no need to store access time
+if (atime != -1 && (status || force
+|| atime > inode.getAccessTime() + fsd.getAccessTimePrecision())) {
+  inode.setAccessTime(atime, latest);
+  status = true;
 }
 return status;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8937a4a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
new file mode 100644
index 000..c75e8ec
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test {@link FSDirAttrOp}.
+ */
+public class TestFSDirAttrOp {
+  public static final Log LOG = LogFactory.getLog(TestFSDirAttrOp.class);
+
+  private boolean unprotectedSetTimes(long atime, long atime0, long precision,
+  long mtime, boolean force) throws QuotaExceededException, 
UnresolvedLinkException {
+FSDirectory fsd = 

hadoop git commit: HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy Xiang.

2016-12-21 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8b042bc1e -> f6e80acd6


HDFS-11258. File mtime change could not save to editlog. Contributed by Jimmy 
Xiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6e80acd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6e80acd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6e80acd

Branch: refs/heads/trunk
Commit: f6e80acd681548b14fe3f0f3d2b3aaf800d10310
Parents: 8b042bc
Author: Haohui Mai 
Authored: Wed Dec 21 13:04:03 2016 -0800
Committer: Haohui Mai 
Committed: Wed Dec 21 13:04:03 2016 -0800

--
 .../hdfs/server/namenode/FSDirAttrOp.java   | 16 ++---
 .../hdfs/server/namenode/TestFSDirAttrOp.java   | 76 
 2 files changed, 82 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e80acd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index a3e7f9f..4d26885 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -489,17 +489,13 @@ public class FSDirAttrOp {
   inode = inode.setModificationTime(mtime, latest);
   status = true;
 }
-if (atime != -1) {
-  long inodeTime = inode.getAccessTime();
 
-  // if the last access time update was within the last precision 
interval, then
-  // no need to store access time
-  if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
-status =  false;
-  } else {
-inode.setAccessTime(atime, latest);
-status = true;
-  }
+// if the last access time update was within the last precision interval,
+// then no need to store access time
+if (atime != -1 && (status || force
+|| atime > inode.getAccessTime() + fsd.getAccessTimePrecision())) {
+  inode.setAccessTime(atime, latest);
+  status = true;
 }
 return status;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e80acd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
new file mode 100644
index 000..8cd68a1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test {@link FSDirAttrOp}.
+ */
+public class TestFSDirAttrOp {
+  public static final Log LOG = LogFactory.getLog(TestFSDirAttrOp.class);
+
+  private boolean unprotectedSetTimes(long atime, long atime0, long precision,
+  long mtime, boolean force) throws QuotaExceededException {
+FSDirectory fsd = Mockito.mock(FSDirectory.class);
+INodesInPath iip = Mockito.mock(INodesInPath.class);
+INode inode = Mockito.mock(INode.class);
+
+when(fsd.getAccessTimePrecision()).thenReturn(precision);
+

hadoop git commit: HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes at runtime. Contributed by Sean Busbey. (cherry picked from commit 3fe61e0bb0d025a6acbb754027f73f3084

2016-03-26 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 02e81caa2 -> 17dbf82f5


HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes 
at runtime. Contributed by Sean Busbey.
(cherry picked from commit 3fe61e0bb0d025a6acbb754027f73f3084b2f4d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17dbf82f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17dbf82f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17dbf82f

Branch: refs/heads/branch-2.7
Commit: 17dbf82f54a14926b7d0dc5d8eb1b35f69b70d3c
Parents: 02e81ca
Author: Aaron T. Myers 
Authored: Thu Apr 9 09:40:08 2015 -0700
Committer: Haohui Mai 
Committed: Sat Mar 26 17:54:09 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 +++--
 .../apache/hadoop/hdfs/TestDFSConfigKeys.java   | 37 
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17dbf82f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f01e697..838dd5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -167,6 +167,9 @@ Release 2.7.2 - 2016-01-25
 
 HDFS-9574. Reduce client failures during datanode restart (kihwal)
 
+HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
+classes at runtime. (Sean Busbey via atm)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17dbf82f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 57d9d59..e73b9da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
-import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -234,7 +233,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY 
= "dfs.namenode.replication.max-streams-hard-limit";
   public static final int 
DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = 
"dfs.web.authentication.filter";
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = 
AuthFilter.class.getName();
+  /* Phrased as below to avoid javac inlining as a constant, to match the 
behavior when
+ this was AuthFilter.class.getName(). Note that if you change the import 
for AuthFilter, you
+ need to update the literal here as well as TestDFSConfigKeys.
+   */
+  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
+  "org.apache.hadoop.hdfs.web.AuthFilter".toString();
   public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
   public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = 
"dfs.webhdfs.user.provider.user.pattern";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17dbf82f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
new file mode 100644
index 000..c7df891
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this 

hadoop git commit: HADOOP-12819. Migrate TestSaslRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-03-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 57fed702f -> 88e7466c4


HADOOP-12819. Migrate TestSaslRPC and related codes to rebase on 
ProtobufRpcEngine. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88e7466c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88e7466c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88e7466c

Branch: refs/heads/branch-2.8
Commit: 88e7466c4b55c57cef5a4545d47e1cba68a4ed06
Parents: 57fed70
Author: Haohui Mai 
Authored: Sun Mar 20 17:40:59 2016 -0700
Committer: Haohui Mai 
Committed: Sun Mar 20 17:43:10 2016 -0700

--
 .../java/org/apache/hadoop/ipc/TestRpcBase.java | 192 -
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 395 +--
 .../hadoop/security/TestDoAsEffectiveUser.java  |   6 +-
 .../hadoop-common/src/test/proto/test.proto |   9 +
 .../src/test/proto/test_rpc_service.proto   |   4 +
 ...apache.hadoop.security.token.TokenIdentifier |   2 +-
 6 files changed, 305 insertions(+), 303 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88e7466c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
index 03fd31e..bc604a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
@@ -29,6 +29,22 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.junit.Assert;
 
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.junit.Assert;
+
+import java.io.DataInput;
+import java.io.DataOutput;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
@@ -37,6 +53,8 @@ import java.net.InetSocketAddress;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 
@@ -149,6 +167,89 @@ public class TestRpcBase {
 return count;
   }
 
+  public static class TestTokenIdentifier extends TokenIdentifier {
+private Text tokenid;
+private Text realUser;
+final static Text KIND_NAME = new Text("test.token");
+
+public TestTokenIdentifier() {
+  this(new Text(), new Text());
+}
+public TestTokenIdentifier(Text tokenid) {
+  this(tokenid, new Text());
+}
+public TestTokenIdentifier(Text tokenid, Text realUser) {
+  this.tokenid = tokenid == null ? new Text() : tokenid;
+  this.realUser = realUser == null ? new Text() : realUser;
+}
+@Override
+public Text getKind() {
+  return KIND_NAME;
+}
+@Override
+public UserGroupInformation getUser() {
+  if (realUser.toString().isEmpty()) {
+return UserGroupInformation.createRemoteUser(tokenid.toString());
+  } else {
+UserGroupInformation realUgi = UserGroupInformation
+.createRemoteUser(realUser.toString());
+return UserGroupInformation
+.createProxyUser(tokenid.toString(), realUgi);
+  }
+}
+
+@Override
+public void readFields(DataInput in) throws IOException {
+  tokenid.readFields(in);
+  realUser.readFields(in);
+}
+@Override
+public void write(DataOutput out) throws IOException {
+  tokenid.write(out);
+  realUser.write(out);
+}
+  }
+
+  public static class TestTokenSecretManager extends
+  SecretManager {
+@Override
+public byte[] createPassword(TestTokenIdentifier id) {
+  return id.getBytes();
+}
+
+@Override
+public byte[] retrievePassword(TestTokenIdentifier id)
+throws InvalidToken {
+  return id.getBytes();
+}
+
+@Override
+public TestTokenIdentifier 

hadoop git commit: HADOOP-12819. Migrate TestSaslRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-03-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 623678215 -> 478a25b92


HADOOP-12819. Migrate TestSaslRPC and related codes to rebase on 
ProtobufRpcEngine. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/478a25b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/478a25b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/478a25b9

Branch: refs/heads/branch-2
Commit: 478a25b92938cae5566b50818ff0f33de49934df
Parents: 6236782
Author: Haohui Mai 
Authored: Sun Mar 20 17:40:59 2016 -0700
Committer: Haohui Mai 
Committed: Sun Mar 20 17:41:20 2016 -0700

--
 .../java/org/apache/hadoop/ipc/TestRpcBase.java | 192 -
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 395 +--
 .../hadoop/security/TestDoAsEffectiveUser.java  |   6 +-
 .../hadoop-common/src/test/proto/test.proto |   9 +
 .../src/test/proto/test_rpc_service.proto   |   4 +
 ...apache.hadoop.security.token.TokenIdentifier |   2 +-
 6 files changed, 305 insertions(+), 303 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/478a25b9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
index 03fd31e..bc604a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
@@ -29,6 +29,22 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.junit.Assert;
 
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.junit.Assert;
+
+import java.io.DataInput;
+import java.io.DataOutput;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
@@ -37,6 +53,8 @@ import java.net.InetSocketAddress;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 
@@ -149,6 +167,89 @@ public class TestRpcBase {
 return count;
   }
 
+  public static class TestTokenIdentifier extends TokenIdentifier {
+private Text tokenid;
+private Text realUser;
+final static Text KIND_NAME = new Text("test.token");
+
+public TestTokenIdentifier() {
+  this(new Text(), new Text());
+}
+public TestTokenIdentifier(Text tokenid) {
+  this(tokenid, new Text());
+}
+public TestTokenIdentifier(Text tokenid, Text realUser) {
+  this.tokenid = tokenid == null ? new Text() : tokenid;
+  this.realUser = realUser == null ? new Text() : realUser;
+}
+@Override
+public Text getKind() {
+  return KIND_NAME;
+}
+@Override
+public UserGroupInformation getUser() {
+  if (realUser.toString().isEmpty()) {
+return UserGroupInformation.createRemoteUser(tokenid.toString());
+  } else {
+UserGroupInformation realUgi = UserGroupInformation
+.createRemoteUser(realUser.toString());
+return UserGroupInformation
+.createProxyUser(tokenid.toString(), realUgi);
+  }
+}
+
+@Override
+public void readFields(DataInput in) throws IOException {
+  tokenid.readFields(in);
+  realUser.readFields(in);
+}
+@Override
+public void write(DataOutput out) throws IOException {
+  tokenid.write(out);
+  realUser.write(out);
+}
+  }
+
+  public static class TestTokenSecretManager extends
+  SecretManager {
+@Override
+public byte[] createPassword(TestTokenIdentifier id) {
+  return id.getBytes();
+}
+
+@Override
+public byte[] retrievePassword(TestTokenIdentifier id)
+throws InvalidToken {
+  return id.getBytes();
+}
+
+@Override
+public TestTokenIdentifier 

hadoop git commit: HADOOP-12819. Migrate TestSaslRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-03-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8a3f0cb25 -> ed1e23fe8


HADOOP-12819. Migrate TestSaslRPC and related codes to rebase on 
ProtobufRpcEngine. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed1e23fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed1e23fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed1e23fe

Branch: refs/heads/trunk
Commit: ed1e23fe8f2d608e68c6d41e81f52d5b7773d96d
Parents: 8a3f0cb
Author: Haohui Mai 
Authored: Sun Mar 20 17:40:59 2016 -0700
Committer: Haohui Mai 
Committed: Sun Mar 20 17:40:59 2016 -0700

--
 .../java/org/apache/hadoop/ipc/TestRpcBase.java | 192 -
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java | 395 +--
 .../hadoop/security/TestDoAsEffectiveUser.java  |   6 +-
 .../hadoop-common/src/test/proto/test.proto |   9 +
 .../src/test/proto/test_rpc_service.proto   |   4 +
 ...apache.hadoop.security.token.TokenIdentifier |   2 +-
 6 files changed, 305 insertions(+), 303 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1e23fe/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
index 03fd31e..bc604a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
@@ -29,6 +29,22 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.junit.Assert;
 
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.junit.Assert;
+
+import java.io.DataInput;
+import java.io.DataOutput;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
@@ -37,6 +53,8 @@ import java.net.InetSocketAddress;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 
@@ -149,6 +167,89 @@ public class TestRpcBase {
 return count;
   }
 
+  public static class TestTokenIdentifier extends TokenIdentifier {
+private Text tokenid;
+private Text realUser;
+final static Text KIND_NAME = new Text("test.token");
+
+public TestTokenIdentifier() {
+  this(new Text(), new Text());
+}
+public TestTokenIdentifier(Text tokenid) {
+  this(tokenid, new Text());
+}
+public TestTokenIdentifier(Text tokenid, Text realUser) {
+  this.tokenid = tokenid == null ? new Text() : tokenid;
+  this.realUser = realUser == null ? new Text() : realUser;
+}
+@Override
+public Text getKind() {
+  return KIND_NAME;
+}
+@Override
+public UserGroupInformation getUser() {
+  if (realUser.toString().isEmpty()) {
+return UserGroupInformation.createRemoteUser(tokenid.toString());
+  } else {
+UserGroupInformation realUgi = UserGroupInformation
+.createRemoteUser(realUser.toString());
+return UserGroupInformation
+.createProxyUser(tokenid.toString(), realUgi);
+  }
+}
+
+@Override
+public void readFields(DataInput in) throws IOException {
+  tokenid.readFields(in);
+  realUser.readFields(in);
+}
+@Override
+public void write(DataOutput out) throws IOException {
+  tokenid.write(out);
+  realUser.write(out);
+}
+  }
+
+  public static class TestTokenSecretManager extends
+  SecretManager {
+@Override
+public byte[] createPassword(TestTokenIdentifier id) {
+  return id.getBytes();
+}
+
+@Override
+public byte[] retrievePassword(TestTokenIdentifier id)
+throws InvalidToken {
+  return id.getBytes();
+}
+
+@Override
+public TestTokenIdentifier createIdentifier() {

hadoop git commit: HDFS-7166. SbNN Web UI shows #Under replicated blocks and #pending deletion blocks. Contributed by Wei-Chiu Chuang.

2016-03-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 878e1cfc7 -> 57fed702f


HDFS-7166. SbNN Web UI shows #Under replicated blocks and #pending deletion 
blocks. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57fed702
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57fed702
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57fed702

Branch: refs/heads/branch-2.8
Commit: 57fed702f17a0ed4462011c0d14c7c605d01bcc3
Parents: 878e1cf
Author: Haohui Mai 
Authored: Sun Mar 20 17:21:25 2016 -0700
Committer: Haohui Mai 
Committed: Sun Mar 20 17:22:09 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57fed702/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 7ad38d7..3d9ca42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -106,7 +106,8 @@
 
 {/if}
 
-{@if cond="{NumberOfMissingBlocks} > 0"}
+{@eq key=nnstat.State value="active"}
+  {@if cond="{NumberOfMissingBlocks} > 0"}
 
   
 
@@ -119,7 +120,8 @@
   
   Please check the logs or run fsck in order to identify the missing 
blocks.{@if cond="{NumberOfSnapshottableDirs} > 0"} Please run fsck with 
-includeSnapshots in order to see detailed reports about snapshots.{/if} See 
the Hadoop FAQ for common causes and potential solutions.
 
-{/if}
+  {/if}
+{/eq}
 {/nn}
 
 Overview {#nnstat}'{HostAndPort}' 
({State}){/nnstat}
@@ -173,10 +175,13 @@
   Dead Nodes{NumDeadDataNodes} 
(Decommissioned: {NumDecomDeadDataNodes})
   Decommissioning 
Nodes{NumDecommissioningDataNodes}
   Total Datanode Volume 
Failures{VolumeFailuresTotal} 
({EstimatedCapacityLostTotal|fmt_bytes})
-  Number of Under-Replicated 
Blocks{UnderReplicatedBlocks}
-  Number of Blocks Pending 
Deletion{PendingDeletionBlocks}
+  {@eq key=nnstat.State value="active"}
+Number of Under-Replicated 
Blocks{UnderReplicatedBlocks}
+Number of Blocks Pending 
Deletion{PendingDeletionBlocks}
+  {/eq}
   Block Deletion Start 
Time{BlockDeletionStartTime|date_tostring}
 {/fs}
+
 
 
 NameNode Journal Status



hadoop git commit: HDFS-7166. SbNN Web UI shows #Under replicated blocks and #pending deletion blocks. Contributed by Wei-Chiu Chuang.

2016-03-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95b59c17a -> 623678215


HDFS-7166. SbNN Web UI shows #Under replicated blocks and #pending deletion 
blocks. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62367821
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62367821
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62367821

Branch: refs/heads/branch-2
Commit: 6236782151a8fdd507ba25a9303376015f824233
Parents: 95b59c1
Author: Haohui Mai 
Authored: Sun Mar 20 17:21:25 2016 -0700
Committer: Haohui Mai 
Committed: Sun Mar 20 17:21:53 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62367821/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 7ad38d7..3d9ca42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -106,7 +106,8 @@
 
 {/if}
 
-{@if cond="{NumberOfMissingBlocks} > 0"}
+{@eq key=nnstat.State value="active"}
+  {@if cond="{NumberOfMissingBlocks} > 0"}
 
   
 
@@ -119,7 +120,8 @@
   
   Please check the logs or run fsck in order to identify the missing 
blocks.{@if cond="{NumberOfSnapshottableDirs} > 0"} Please run fsck with 
-includeSnapshots in order to see detailed reports about snapshots.{/if} See 
the Hadoop FAQ for common causes and potential solutions.
 
-{/if}
+  {/if}
+{/eq}
 {/nn}
 
 Overview {#nnstat}'{HostAndPort}' 
({State}){/nnstat}
@@ -173,10 +175,13 @@
   Dead Nodes{NumDeadDataNodes} 
(Decommissioned: {NumDecomDeadDataNodes})
   Decommissioning 
Nodes{NumDecommissioningDataNodes}
   Total Datanode Volume 
Failures{VolumeFailuresTotal} 
({EstimatedCapacityLostTotal|fmt_bytes})
-  Number of Under-Replicated 
Blocks{UnderReplicatedBlocks}
-  Number of Blocks Pending 
Deletion{PendingDeletionBlocks}
+  {@eq key=nnstat.State value="active"}
+Number of Under-Replicated 
Blocks{UnderReplicatedBlocks}
+Number of Blocks Pending 
Deletion{PendingDeletionBlocks}
+  {/eq}
   Block Deletion Start 
Time{BlockDeletionStartTime|date_tostring}
 {/fs}
+
 
 
 NameNode Journal Status



[2/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69b195d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69b195d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69b195d6

Branch: refs/heads/branch-2.8
Commit: 69b195d619fdc4b00c912e61879e689dd33d89e7
Parents: 7ddff4b
Author: Haohui Mai <whe...@apache.org>
Authored: Mon Feb 29 11:41:00 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Mon Feb 29 14:15:25 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |4 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java  |   14 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |  137 +--
 .../java/org/apache/hadoop/ipc/TestRPC.java | 1071 --
 .../hadoop/ipc/TestRPCServerShutdown.java   |  106 ++
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  295 +
 .../hadoop-common/src/test/proto/test.proto |   33 +
 .../src/test/proto/test_rpc_service.proto   |7 +
 .../server/nodemanager/TestNMAuditLogger.java   |   40 +-
 .../resourcemanager/TestRMAuditLogger.java  |   44 +-
 11 files changed, 1030 insertions(+), 724 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 03a844f..8ef0723 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1055,6 +1055,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12846. Credential Provider Recursive Dependencies.
 (Larry McCay via cnauroth)
 
+HADOOP-12813. Migrate TestRPC and related codes to rebase on
+ProtobufRpcEngine. (Kai Zheng via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 6400e87..eb7b949 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -54,7 +52,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  */
-public class RPCCallBenchmark implements Tool {
+public class RPCCallBenchmark extends TestRpcBase implements Tool {
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
index 29a293f..8b419e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Before;
@@ -32,8 +30,7 @@ import org.junit.After;
 import org.junit.T

[1/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7ddff4b37 -> 69b195d61


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto 
b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
index 722af89..abb3883 100644
--- a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
+++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
@@ -32,6 +32,13 @@ service TestProtobufRpcProto {
   rpc echo(EchoRequestProto) returns (EchoResponseProto);
   rpc error(EmptyRequestProto) returns (EmptyResponseProto);
   rpc error2(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc slowPing(SlowPingRequestProto) returns (EmptyResponseProto);
+  rpc echo2(EchoRequestProto2) returns (EchoResponseProto2);
+  rpc add(AddRequestProto) returns (AddResponseProto);
+  rpc add2(AddRequestProto2) returns (AddResponseProto);
+  rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
+  rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
 }
 
 service TestProtobufRpc2Proto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b195d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
index 39e6dc5..44ed883 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
@@ -24,15 +24,25 @@ import static org.mockito.Mockito.when;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.ipc.TestRpcBase;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -188,12 +198,19 @@ public class TestNMAuditLogger {
* A special extension of {@link TestImpl} RPC server with 
* {@link TestImpl#ping()} testing the audit logs.
*/
-  private class MyTestRPCServer extends TestImpl {
+  private class MyTestRPCServer extends TestRpcBase.PBServerImpl {
 @Override
-public void ping() {
+public TestProtos.EmptyResponseProto ping(
+RpcController unused, TestProtos.EmptyRequestProto request)
+throws ServiceException {
+  // Ensure clientId is received
+  byte[] clientId = Server.getClientId();
+  Assert.assertNotNull(clientId);
+  Assert.assertEquals(ClientId.BYTE_LENGTH, clientId.length);
   // test with ip set
   testSuccessLogFormat(true);
   testFailureLogFormat(true);
+  return TestProtos.EmptyResponseProto.newBuilder().build();
 }
   }
 
@@ -203,9 +220,17 @@ public class TestNMAuditLogger {
   @Test  
   public void testNMAuditLoggerWithIP() throws Exception {
 Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+
+// Create server side implementation
+MyTestRPCServer serverImpl = new MyTestRPCServer();
+BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
+.newReflectiveBlockingService(serverImpl);
+
 // start the IPC server
-Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
+Server server = new 

[1/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bd0f5085e -> c5db4ab0b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto 
b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
index 722af89..abb3883 100644
--- a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
+++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
@@ -32,6 +32,13 @@ service TestProtobufRpcProto {
   rpc echo(EchoRequestProto) returns (EchoResponseProto);
   rpc error(EmptyRequestProto) returns (EmptyResponseProto);
   rpc error2(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc slowPing(SlowPingRequestProto) returns (EmptyResponseProto);
+  rpc echo2(EchoRequestProto2) returns (EchoResponseProto2);
+  rpc add(AddRequestProto) returns (AddResponseProto);
+  rpc add2(AddRequestProto2) returns (AddResponseProto);
+  rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
+  rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
 }
 
 service TestProtobufRpc2Proto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
index 39e6dc5..44ed883 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
@@ -24,15 +24,25 @@ import static org.mockito.Mockito.when;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.ipc.TestRpcBase;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -188,12 +198,19 @@ public class TestNMAuditLogger {
* A special extension of {@link TestImpl} RPC server with 
* {@link TestImpl#ping()} testing the audit logs.
*/
-  private class MyTestRPCServer extends TestImpl {
+  private class MyTestRPCServer extends TestRpcBase.PBServerImpl {
 @Override
-public void ping() {
+public TestProtos.EmptyResponseProto ping(
+RpcController unused, TestProtos.EmptyRequestProto request)
+throws ServiceException {
+  // Ensure clientId is received
+  byte[] clientId = Server.getClientId();
+  Assert.assertNotNull(clientId);
+  Assert.assertEquals(ClientId.BYTE_LENGTH, clientId.length);
   // test with ip set
   testSuccessLogFormat(true);
   testFailureLogFormat(true);
+  return TestProtos.EmptyResponseProto.newBuilder().build();
 }
   }
 
@@ -203,9 +220,17 @@ public class TestNMAuditLogger {
   @Test  
   public void testNMAuditLoggerWithIP() throws Exception {
 Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+
+// Create server side implementation
+MyTestRPCServer serverImpl = new MyTestRPCServer();
+BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
+.newReflectiveBlockingService(serverImpl);
+
 // start the IPC server
-Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
+Server server = new 

[2/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5db4ab0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5db4ab0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5db4ab0

Branch: refs/heads/branch-2
Commit: c5db4ab0b44e7feeb1afe2d9553665d2af3c9a34
Parents: bd0f508
Author: Haohui Mai <whe...@apache.org>
Authored: Mon Feb 29 11:41:00 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Mon Feb 29 14:10:18 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |4 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java  |   14 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |  137 +--
 .../java/org/apache/hadoop/ipc/TestRPC.java | 1065 --
 .../hadoop/ipc/TestRPCServerShutdown.java   |  106 ++
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  295 +
 .../hadoop-common/src/test/proto/test.proto |   33 +
 .../src/test/proto/test_rpc_service.proto   |7 +
 .../server/nodemanager/TestNMAuditLogger.java   |   40 +-
 .../resourcemanager/TestRMAuditLogger.java  |   44 +-
 11 files changed, 1021 insertions(+), 727 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 773edd0..ac31a0c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1127,6 +1127,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12846. Credential Provider Recursive Dependencies.
 (Larry McCay via cnauroth)
 
+HADOOP-12813. Migrate TestRPC and related codes to rebase on
+ProtobufRpcEngine. (Kai Zheng via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 6400e87..eb7b949 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -54,7 +52,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  */
-public class RPCCallBenchmark implements Tool {
+public class RPCCallBenchmark extends TestRpcBase implements Tool {
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5db4ab0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
index 29a293f..8b419e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Before;
@@ -32,8 +30,7 @@ import org.junit.After;
 import org.junit.T

[1/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1cb2f9345 -> 0fa54d45b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
index 39e6dc5..44ed883 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNMAuditLogger.java
@@ -24,15 +24,25 @@ import static org.mockito.Mockito.when;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
+import org.apache.hadoop.ipc.TestRpcBase;
+import org.apache.hadoop.ipc.protobuf.TestProtos;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.Keys;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -188,12 +198,19 @@ public class TestNMAuditLogger {
* A special extension of {@link TestImpl} RPC server with 
* {@link TestImpl#ping()} testing the audit logs.
*/
-  private class MyTestRPCServer extends TestImpl {
+  private class MyTestRPCServer extends TestRpcBase.PBServerImpl {
 @Override
-public void ping() {
+public TestProtos.EmptyResponseProto ping(
+RpcController unused, TestProtos.EmptyRequestProto request)
+throws ServiceException {
+  // Ensure clientId is received
+  byte[] clientId = Server.getClientId();
+  Assert.assertNotNull(clientId);
+  Assert.assertEquals(ClientId.BYTE_LENGTH, clientId.length);
   // test with ip set
   testSuccessLogFormat(true);
   testFailureLogFormat(true);
+  return TestProtos.EmptyResponseProto.newBuilder().build();
 }
   }
 
@@ -203,9 +220,17 @@ public class TestNMAuditLogger {
   @Test  
   public void testNMAuditLoggerWithIP() throws Exception {
 Configuration conf = new Configuration();
+RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+
+// Create server side implementation
+MyTestRPCServer serverImpl = new MyTestRPCServer();
+BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
+.newReflectiveBlockingService(serverImpl);
+
 // start the IPC server
-Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-.setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
+Server server = new RPC.Builder(conf)
+.setProtocol(TestRpcBase.TestRpcService.class)
+.setInstance(service).setBindAddress("0.0.0.0")
 .setPort(0).setNumHandlers(5).setVerbose(true).build();
 
 server.start();
@@ -213,11 +238,14 @@ public class TestNMAuditLogger {
 InetSocketAddress addr = NetUtils.getConnectAddress(server);
 
 // Make a client connection and test the audit log
-TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
+TestRpcService proxy = RPC.getProxy(TestRpcService.class,
TestProtocol.versionID, addr, conf);
 // Start the testcase
-proxy.ping();
+TestProtos.EmptyRequestProto pingRequest =
+TestProtos.EmptyRequestProto.newBuilder().build();
+proxy.ping(null, pingRequest);
 
 server.stop();
+RPC.stopProxy(proxy);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java
--
diff --git 

[2/2] hadoop git commit: HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. Contributed by Kai Zheng.

2016-02-29 Thread wheat9
HADOOP-12813. Migrate TestRPC and related codes to rebase on ProtobufRpcEngine. 
Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fa54d45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fa54d45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fa54d45

Branch: refs/heads/trunk
Commit: 0fa54d45b1cf8a29f089f64d24f35bd221b4803f
Parents: 1cb2f93
Author: Haohui Mai <whe...@apache.org>
Authored: Mon Feb 29 11:41:00 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Mon Feb 29 11:41:00 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |3 +
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |4 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java  |   14 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java  |  137 +--
 .../java/org/apache/hadoop/ipc/TestRPC.java | 1013 --
 .../hadoop/ipc/TestRPCServerShutdown.java   |  106 ++
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  295 +
 .../hadoop-common/src/test/proto/test.proto |   33 +
 .../src/test/proto/test_rpc_service.proto   |7 +
 .../server/nodemanager/TestNMAuditLogger.java   |   40 +-
 .../resourcemanager/TestRMAuditLogger.java  |   44 +-
 11 files changed, 987 insertions(+), 709 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index eb33464..f444b71 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1763,6 +1763,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12846. Credential Provider Recursive Dependencies.
 (Larry McCay via cnauroth)
 
+HADOOP-12813. Migrate TestRPC and related codes to rebase on
+ProtobufRpcEngine. (Kai Zheng via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
index 6400e87..eb7b949 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -54,7 +52,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  */
-public class RPCCallBenchmark implements Tool {
+public class RPCCallBenchmark extends TestRpcBase implements Tool {
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fa54d45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
index 29a293f..8b419e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.TestProtoBufRpc.PBServerImpl;
-import org.apache.hadoop.ipc.TestProtoBufRpc.TestRpcService;
 import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Before;
@@ -32,8 +30,7 @@ import org.junit.After;
 import org.junit.T

hadoop git commit: HDFS-9815. Move o.a.h.fs.Hdfs to hadoop-hdfs-client. Contributed by Vinayakumar B.

2016-02-17 Thread wheat9
* Returns a new {@link WebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new WebHdfsFileSystem
+   */
+  private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) 
{
+WebHdfsFileSystem fs = new WebHdfsFileSystem();
+fs.setConf(conf);
+return fs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b318429a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
new file mode 100644
index 000..53b2a5a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+Implementations of {@link org.apache.hadoop.fs.AbstractFileSystem} for hdfs
+over rpc and hdfs over web.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b318429a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ca06efd..dc5bdc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1766,6 +1766,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9456. BlockPlacementPolicyWithNodeGroup should override 
 verifyBlockPlacement(). (Xiaobing Zhou via junping_du)
 
+HDFS-9815. Move o.a.h.fs.Hdfs to hadoop-hdfs-client.
+(Vinayakumar B via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b318429a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
deleted file mode 100644
index 6dd8884..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ /dev/null
@@ -1,535 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSInputStream;
-import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti

hadoop git commit: HDFS-9815. Move o.a.h.fs.Hdfs to hadoop-hdfs-client. Contributed by Vinayakumar B.

2016-02-17 Thread wheat9
* Returns a new {@link WebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new WebHdfsFileSystem
+   */
+  private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) 
{
+WebHdfsFileSystem fs = new WebHdfsFileSystem();
+fs.setConf(conf);
+return fs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7d1507e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
new file mode 100644
index 000..53b2a5a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+Implementations of {@link org.apache.hadoop.fs.AbstractFileSystem} for hdfs
+over rpc and hdfs over web.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7d1507e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3c605ac..9e3315a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1850,6 +1850,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9456. BlockPlacementPolicyWithNodeGroup should override 
 verifyBlockPlacement(). (Xiaobing Zhou via junping_du)
 
+HDFS-9815. Move o.a.h.fs.Hdfs to hadoop-hdfs-client.
+(Vinayakumar B via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7d1507e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
deleted file mode 100644
index 6dd8884..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ /dev/null
@@ -1,535 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSInputStream;
-import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import 

hadoop git commit: HDFS-9815. Move o.a.h.fs.Hdfs to hadoop-hdfs-client. Contributed by Vinayakumar B.

2016-02-17 Thread wheat9
ink WebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new WebHdfsFileSystem
+   */
+  private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) 
{
+WebHdfsFileSystem fs = new WebHdfsFileSystem();
+fs.setConf(conf);
+return fs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77f7ca3e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
new file mode 100644
index 000..53b2a5a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/package.html
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+Implementations of {@link org.apache.hadoop.fs.AbstractFileSystem} for hdfs
+over rpc and hdfs over web.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77f7ca3e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 79369da..5ca7f8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2774,6 +2774,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9456. BlockPlacementPolicyWithNodeGroup should override 
 verifyBlockPlacement(). (Xiaobing Zhou via junping_du)
 
+HDFS-9815. Move o.a.h.fs.Hdfs to hadoop-hdfs-client.
+(Vinayakumar B via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77f7ca3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
deleted file mode 100644
index 6dd8884..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ /dev/null
@@ -1,535 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSInputStream;
-import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
-import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.apa

hadoop git commit: HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse ObjectMapper. Contributed by Akira Ajisaka.

2016-02-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 609021187 -> fdb89ac0a


HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse 
ObjectMapper. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdb89ac0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdb89ac0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdb89ac0

Branch: refs/heads/branch-2.8
Commit: fdb89ac0aef681bf0110d49a18da65f4b4da1345
Parents: 6090211
Author: Haohui Mai <hao...@uber.com>
Authored: Thu Feb 4 11:28:45 2016 -0800
Committer: Haohui Mai <hao...@uber.com>
Committed: Thu Feb 4 13:41:14 2016 -0800

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  6 +++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 23 ++--
 3 files changed, 19 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb89ac0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index cc22040..0fbad63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -87,6 +87,7 @@ import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -128,6 +129,8 @@ public class WebHdfsFileSystem extends FileSystem
   private InetSocketAddress nnAddrs[];
   private int currentNNAddrIndex;
   private boolean disallowFallbackToInsecureCluster;
+  private static final ObjectReader READER =
+  new ObjectMapper().reader(Map.class);
 
   /**
* Return the protocol scheme for the FileSystem.
@@ -368,8 +371,7 @@ public class WebHdfsFileSystem extends FileSystem
   + "\" (parsed=\"" + parsed + "\")");
 }
   }
-  ObjectMapper mapper = new ObjectMapper();
-  return mapper.reader(Map.class).readValue(in);
+  return READER.readValue(in);
 } finally {
   in.close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb89ac0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 27de25f..2c40478 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1794,6 +1794,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9730. Storage ID update does not happen when there is a layout change
 (Tsz Wo Nicholas Sze via kihwal)
 
+HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse
+ObjectMapper. (Akira AJISAKA via wheat9)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb89ac0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index b3cae6b..d7b5c31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -38,6 +38,12 @@ import java.util.*;
 public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
 
+  // Reuse ObjectMapper instance for improving performance.
+  // ObjectMapper is thread safe as long as we always configure instance
+  // before use. We don't have a re-entrant call pattern in WebHDFS,
+  // so we just need to worry about thread-safety.
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token 
token
   ) throws IOException {
@@ -72,9 +78,8 @@ public class JsonUtil {
   public static String toJsonString(final String key, final Object value) {
 final 

hadoop git commit: HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse ObjectMapper. Contributed by Akira Ajisaka.

2016-02-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 496f33de0 -> 1bcfab8e7


HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse 
ObjectMapper. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bcfab8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bcfab8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bcfab8e

Branch: refs/heads/trunk
Commit: 1bcfab8e7fd8562f1829ac484d2f6c91f7afe3d6
Parents: 496f33d
Author: Haohui Mai <hao...@uber.com>
Authored: Thu Feb 4 11:28:45 2016 -0800
Committer: Haohui Mai <hao...@uber.com>
Committed: Thu Feb 4 11:34:26 2016 -0800

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  6 +++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 23 ++--
 3 files changed, 19 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bcfab8e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index d806d55..82cf655 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -87,6 +87,7 @@ import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -128,6 +129,8 @@ public class WebHdfsFileSystem extends FileSystem
   private InetSocketAddress nnAddrs[];
   private int currentNNAddrIndex;
   private boolean disallowFallbackToInsecureCluster;
+  private static final ObjectReader READER =
+  new ObjectMapper().reader(Map.class);
 
   /**
* Return the protocol scheme for the FileSystem.
@@ -361,8 +364,7 @@ public class WebHdfsFileSystem extends FileSystem
   + "\" (parsed=\"" + parsed + "\")");
 }
   }
-  ObjectMapper mapper = new ObjectMapper();
-  return mapper.reader(Map.class).readValue(in);
+  return READER.readValue(in);
 } finally {
   in.close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bcfab8e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 846eb2b..dc27f91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2775,6 +2775,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9730. Storage ID update does not happen when there is a layout change
 (Tsz Wo Nicholas Sze via kihwal)
 
+HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse
+ObjectMapper. (Akira AJISAKA via wheat9)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bcfab8e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 1f5eaf6..342f719 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -38,6 +38,12 @@ import java.util.*;
 public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
 
+  // Reuse ObjectMapper instance for improving performance.
+  // ObjectMapper is thread safe as long as we always configure instance
+  // before use. We don't have a re-entrant call pattern in WebHDFS,
+  // so we just need to worry about thread-safety.
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token 
token
   ) throws IOException {
@@ -72,9 +78,8 @@ public class JsonUtil {
   public static String toJsonString(final String key, final Object value) {
 final Map<String, 

hadoop git commit: HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse ObjectMapper. Contributed by Akira Ajisaka.

2016-02-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e26c58fb4 -> e62ca1485


HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse 
ObjectMapper. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e62ca148
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e62ca148
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e62ca148

Branch: refs/heads/branch-2
Commit: e62ca14856a76a1ca717fce5dfd703c5ec9e7e15
Parents: e26c58f
Author: Haohui Mai <hao...@uber.com>
Authored: Thu Feb 4 11:28:45 2016 -0800
Committer: Haohui Mai <hao...@uber.com>
Committed: Thu Feb 4 11:35:34 2016 -0800

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  6 +++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 23 ++--
 3 files changed, 19 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e62ca148/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index cc22040..0fbad63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -87,6 +87,7 @@ import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelect
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -128,6 +129,8 @@ public class WebHdfsFileSystem extends FileSystem
   private InetSocketAddress nnAddrs[];
   private int currentNNAddrIndex;
   private boolean disallowFallbackToInsecureCluster;
+  private static final ObjectReader READER =
+  new ObjectMapper().reader(Map.class);
 
   /**
* Return the protocol scheme for the FileSystem.
@@ -368,8 +371,7 @@ public class WebHdfsFileSystem extends FileSystem
   + "\" (parsed=\"" + parsed + "\")");
 }
   }
-  ObjectMapper mapper = new ObjectMapper();
-  return mapper.reader(Map.class).readValue(in);
+  return READER.readValue(in);
 } finally {
   in.close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e62ca148/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6355944..f1bbf08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1870,6 +1870,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9730. Storage ID update does not happen when there is a layout change
 (Tsz Wo Nicholas Sze via kihwal)
 
+HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse
+ObjectMapper. (Akira AJISAKA via wheat9)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e62ca148/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index b3cae6b..d7b5c31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -38,6 +38,12 @@ import java.util.*;
 public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
 
+  // Reuse ObjectMapper instance for improving performance.
+  // ObjectMapper is thread safe as long as we always configure instance
+  // before use. We don't have a re-entrant call pattern in WebHDFS,
+  // so we just need to worry about thread-safety.
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token 
token
   ) throws IOException {
@@ -72,9 +78,8 @@ public class JsonUtil {
   public static String toJsonString(final String key, final Object value) {
 final 

hadoop git commit: HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse ObjectMapper. Contributed by Akira Ajisaka.

2016-02-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c10bf788e -> cb3aa41f5


HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse 
ObjectMapper. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3aa41f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3aa41f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3aa41f

Branch: refs/heads/branch-2.7
Commit: cb3aa41f53029d2b543b578f5506db4ccd26d414
Parents: c10bf78
Author: Haohui Mai <hao...@uber.com>
Authored: Thu Feb 4 11:28:45 2016 -0800
Committer: Haohui Mai <hao...@uber.com>
Committed: Thu Feb 4 11:36:12 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 23 ++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  6 +++--
 3 files changed, 19 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3aa41f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17561f0..1d8d7a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -89,6 +89,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9730. Storage ID update does not happen when there is a layout change
 (Tsz Wo Nicholas Sze via kihwal)
 
+HDFS-9724. Degraded performance in WebHDFS listing as it does not reuse
+ObjectMapper. (Akira AJISAKA via wheat9)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3aa41f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index bd0a5e6..12a4ed4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -50,6 +50,12 @@ public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
   private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
 
+  // Reuse ObjectMapper instance for improving performance.
+  // ObjectMapper is thread safe as long as we always configure instance
+  // before use. We don't have a re-entrant call pattern in WebHDFS,
+  // so we just need to worry about thread-safety.
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token 
token
   ) throws IOException {
@@ -120,9 +126,8 @@ public class JsonUtil {
   public static String toJsonString(final String key, final Object value) {
 final Map<String, Object> m = new TreeMap<String, Object>();
 m.put(key, value);
-ObjectMapper mapper = new ObjectMapper();
 try {
-  return mapper.writeValueAsString(m);
+  return MAPPER.writeValueAsString(m);
 } catch (IOException ignored) {
 }
 return null;
@@ -185,10 +190,9 @@ public class JsonUtil {
 m.put("fileId", status.getFileId());
 m.put("childrenNum", status.getChildrenNum());
 m.put("storagePolicy", status.getStoragePolicy());
-ObjectMapper mapper = new ObjectMapper();
 try {
   return includeType ?
-  toJsonString(FileStatus.class, m) : mapper.writeValueAsString(m);
+  toJsonString(FileStatus.class, m) : MAPPER.writeValueAsString(m);
 } catch (IOException ignored) {
 }
 return null;
@@ -644,9 +648,8 @@ public class JsonUtil {
 new TreeMap<String, Map<String, Object>>();
 finalMap.put(AclStatus.class.getSimpleName(), m);
 
-ObjectMapper mapper = new ObjectMapper();
 try {
-  return mapper.writeValueAsString(finalMap);
+  return MAPPER.writeValueAsString(finalMap);
 } catch (IOException ignored) {
 }
 return null;
@@ -713,8 +716,7 @@ public class JsonUtil {
   final XAttrCodec encoding) throws IOException {
 final Map<String, Object> finalMap = new TreeMap<String, Object>();
 finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
-ObjectMapper mapper = new ObjectMapper();
-return mapper.writeValueAsString(finalMap);
+return MAPPER.writeValueAsString(finalMap);
   }
   
   public static String toJsonString(final List xAttrs)
@@ -723,11 +725,10 @@ public class Js

[2/2] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

2016-01-06 Thread wheat9
HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71006d5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71006d5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71006d5c

Branch: refs/heads/branch-2
Commit: 71006d5c2276cc7429c9444cd127bbad0b3a8063
Parents: 818cd0e
Author: Haohui Mai 
Authored: Wed Jan 6 16:12:24 2016 -0800
Committer: Haohui Mai 
Committed: Wed Jan 6 16:13:43 2016 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   |5 +-
 .../src/CMakeLists.txt  |5 -
 .../src/contrib/libwebhdfs/CMakeLists.txt   |   88 -
 .../libwebhdfs/resources/FindJansson.cmake  |   43 -
 .../contrib/libwebhdfs/src/hdfs_http_client.c   |  490 --
 .../contrib/libwebhdfs/src/hdfs_http_client.h   |  294 
 .../contrib/libwebhdfs/src/hdfs_http_query.c|  402 -
 .../contrib/libwebhdfs/src/hdfs_http_query.h|  240 ---
 .../contrib/libwebhdfs/src/hdfs_json_parser.c   |  654 
 .../contrib/libwebhdfs/src/hdfs_json_parser.h   |  178 --
 .../src/contrib/libwebhdfs/src/hdfs_web.c   | 1538 --
 .../libwebhdfs/src/test_libwebhdfs_ops.c|  552 ---
 .../libwebhdfs/src/test_libwebhdfs_read.c   |   78 -
 .../libwebhdfs/src/test_libwebhdfs_threaded.c   |  247 ---
 .../libwebhdfs/src/test_libwebhdfs_write.c  |  111 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 16 files changed, 4 insertions(+), 4923 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71006d5c/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 6e8033f..b578667 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -32,7 +32,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 false
-false
   
 
   
@@ -140,7 +139,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
-  
+  
 
 
@@ -199,7 +198,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71006d5c/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 0a6f383..d7bfb76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -91,11 +91,6 @@ endfunction()
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
 
-
-if(REQUIRE_LIBWEBHDFS)
-add_subdirectory(contrib/libwebhdfs)
-endif()
-
 # Find Linux FUSE
 if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 find_package(PkgConfig REQUIRED)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71006d5c/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
deleted file mode 100644
index cc2b42d..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-find_package(CURL REQUIRED)
-

[1/2] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

2016-01-06 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 818cd0e90 -> 71006d5c2


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71006d5c/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
deleted file mode 100644
index c5f2f9c..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef _HDFS_JSON_PARSER_H_
-#define _HDFS_JSON_PARSER_H_
-
-/**
- * Parse the response for MKDIR request. The response uses TRUE/FALSE 
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseMKDIR(const char *response);
-
-/**
- * Parse the response for RENAME request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseRENAME(const char *response);
-
-/**
- * Parse the response for DELETE request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseDELETE(const char *response);
-
-/**
- * Parse the response for SETREPLICATION request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseSETREPLICATION(const char *response);
-
-/**
- * Parse the response for OPEN (read) request. A successful operation 
- * will return "200 OK".
- *
- * @param response  The response information for parsing
- * @return  0 for success , -1 for out of range, other values for error
- */
-int parseOPEN(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from NameNode) request. 
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message 
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from DataNode) request. 
- * A successful operation should return "201 Created" in its header.
- * 
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (sent from NameNode) request.
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (from DataNode) request.
- * A successful operation should return "200 OK" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response (from NameNode) to get the location information 
- * of the DataNode that should be contacted for the following write operation.
- *
- * @param content   Content of the http header
- * @param dnTo store the location of the DataNode for writing
- * @return  0 for success
- */
-int parseDnLoc(char *content, char **dn) __attribute__ ((warn_unused_result));
-

[2/2] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

2016-01-06 Thread wheat9
HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb6f03d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb6f03d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb6f03d5

Branch: refs/heads/branch-2.8
Commit: bb6f03d597dca63c012d345b0e9a5b5d8cd27ca2
Parents: 506a517
Author: Haohui Mai 
Authored: Wed Jan 6 16:12:24 2016 -0800
Committer: Haohui Mai 
Committed: Wed Jan 6 16:39:51 2016 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   |5 +-
 .../src/CMakeLists.txt  |5 -
 .../src/contrib/libwebhdfs/CMakeLists.txt   |   88 -
 .../libwebhdfs/resources/FindJansson.cmake  |   43 -
 .../contrib/libwebhdfs/src/hdfs_http_client.c   |  490 --
 .../contrib/libwebhdfs/src/hdfs_http_client.h   |  294 
 .../contrib/libwebhdfs/src/hdfs_http_query.c|  402 -
 .../contrib/libwebhdfs/src/hdfs_http_query.h|  240 ---
 .../contrib/libwebhdfs/src/hdfs_json_parser.c   |  654 
 .../contrib/libwebhdfs/src/hdfs_json_parser.h   |  178 --
 .../src/contrib/libwebhdfs/src/hdfs_web.c   | 1538 --
 .../libwebhdfs/src/test_libwebhdfs_ops.c|  552 ---
 .../libwebhdfs/src/test_libwebhdfs_read.c   |   78 -
 .../libwebhdfs/src/test_libwebhdfs_threaded.c   |  247 ---
 .../libwebhdfs/src/test_libwebhdfs_write.c  |  111 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 16 files changed, 4 insertions(+), 4923 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6f03d5/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 85c0fe4..f66b72c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -32,7 +32,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 false
-false
   
 
   
@@ -140,7 +139,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
-  
+  
 
 
@@ -199,7 +198,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6f03d5/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 0a6f383..d7bfb76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -91,11 +91,6 @@ endfunction()
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
 
-
-if(REQUIRE_LIBWEBHDFS)
-add_subdirectory(contrib/libwebhdfs)
-endif()
-
 # Find Linux FUSE
 if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 find_package(PkgConfig REQUIRED)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6f03d5/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
deleted file mode 100644
index cc2b42d..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-find_package(CURL REQUIRED)
-

[2/2] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

2016-01-06 Thread wheat9
HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c213ee08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c213ee08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c213ee08

Branch: refs/heads/trunk
Commit: c213ee085971483d737a2d4652adfda0f767eea0
Parents: b6c9d3f
Author: Haohui Mai 
Authored: Wed Jan 6 16:12:24 2016 -0800
Committer: Haohui Mai 
Committed: Wed Jan 6 16:16:31 2016 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   |5 +-
 .../src/CMakeLists.txt  |5 -
 .../src/contrib/libwebhdfs/CMakeLists.txt   |   88 -
 .../libwebhdfs/resources/FindJansson.cmake  |   43 -
 .../contrib/libwebhdfs/src/hdfs_http_client.c   |  490 --
 .../contrib/libwebhdfs/src/hdfs_http_client.h   |  294 
 .../contrib/libwebhdfs/src/hdfs_http_query.c|  402 -
 .../contrib/libwebhdfs/src/hdfs_http_query.h|  240 ---
 .../contrib/libwebhdfs/src/hdfs_json_parser.c   |  654 
 .../contrib/libwebhdfs/src/hdfs_json_parser.h   |  178 --
 .../src/contrib/libwebhdfs/src/hdfs_web.c   | 1538 --
 .../libwebhdfs/src/test_libwebhdfs_ops.c|  552 ---
 .../libwebhdfs/src/test_libwebhdfs_read.c   |   78 -
 .../libwebhdfs/src/test_libwebhdfs_threaded.c   |  247 ---
 .../libwebhdfs/src/test_libwebhdfs_write.c  |  111 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 16 files changed, 4 insertions(+), 4923 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 9fa5fbf..9f2c77d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -32,7 +32,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   
 false
-false
   
 
   
@@ -140,7 +139,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
-  
+  
 
 
@@ -199,7 +198,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 0a6f383..d7bfb76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -91,11 +91,6 @@ endfunction()
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
 
-
-if(REQUIRE_LIBWEBHDFS)
-add_subdirectory(contrib/libwebhdfs)
-endif()
-
 # Find Linux FUSE
 if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 find_package(PkgConfig REQUIRED)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
deleted file mode 100644
index cc2b42d..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-find_package(CURL REQUIRED)
-

[1/2] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

2016-01-06 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk b6c9d3fab -> c213ee085


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c213ee08/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
deleted file mode 100644
index c5f2f9c..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef _HDFS_JSON_PARSER_H_
-#define _HDFS_JSON_PARSER_H_
-
-/**
- * Parse the response for MKDIR request. The response uses TRUE/FALSE 
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseMKDIR(const char *response);
-
-/**
- * Parse the response for RENAME request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseRENAME(const char *response);
-
-/**
- * Parse the response for DELETE request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseDELETE(const char *response);
-
-/**
- * Parse the response for SETREPLICATION request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseSETREPLICATION(const char *response);
-
-/**
- * Parse the response for OPEN (read) request. A successful operation 
- * will return "200 OK".
- *
- * @param response  The response information for parsing
- * @return  0 for success , -1 for out of range, other values for error
- */
-int parseOPEN(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from NameNode) request. 
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message 
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from DataNode) request. 
- * A successful operation should return "201 Created" in its header.
- * 
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (sent from NameNode) request.
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (from DataNode) request.
- * A successful operation should return "200 OK" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response (from NameNode) to get the location information 
- * of the DataNode that should be contacted for the following write operation.
- *
- * @param content   Content of the http header
- * @param dnTo store the location of the DataNode for writing
- * @return  0 for success
- */
-int parseDnLoc(char *content, char **dn) __attribute__ ((warn_unused_result));
-

[1/2] hadoop git commit: HDFS-9047. Retire libwebhdfs. Contributed by Haohui Mai.

2016-01-06 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 506a517f9 -> bb6f03d59


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6f03d5/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
deleted file mode 100644
index c5f2f9c..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef _HDFS_JSON_PARSER_H_
-#define _HDFS_JSON_PARSER_H_
-
-/**
- * Parse the response for MKDIR request. The response uses TRUE/FALSE 
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseMKDIR(const char *response);
-
-/**
- * Parse the response for RENAME request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseRENAME(const char *response);
-
-/**
- * Parse the response for DELETE request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseDELETE(const char *response);
-
-/**
- * Parse the response for SETREPLICATION request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseSETREPLICATION(const char *response);
-
-/**
- * Parse the response for OPEN (read) request. A successful operation 
- * will return "200 OK".
- *
- * @param response  The response information for parsing
- * @return  0 for success , -1 for out of range, other values for error
- */
-int parseOPEN(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from NameNode) request. 
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message 
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from DataNode) request. 
- * A successful operation should return "201 Created" in its header.
- * 
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (sent from NameNode) request.
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (from DataNode) request.
- * A successful operation should return "200 OK" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response (from NameNode) to get the location information 
- * of the DataNode that should be contacted for the following write operation.
- *
- * @param content   Content of the http header
- * @param dnTo store the location of the DataNode for writing
- * @return  0 for success
- */
-int parseDnLoc(char *content, char **dn) __attribute__ ((warn_unused_result));

[25/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/dbformat_test.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/dbformat_test.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/dbformat_test.cc
new file mode 100644
index 000..5d82f5d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/dbformat_test.cc
@@ -0,0 +1,112 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/dbformat.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+
+namespace leveldb {
+
+static std::string IKey(const std::string& user_key,
+uint64_t seq,
+ValueType vt) {
+  std::string encoded;
+  AppendInternalKey(, ParsedInternalKey(user_key, seq, vt));
+  return encoded;
+}
+
+static std::string Shorten(const std::string& s, const std::string& l) {
+  std::string result = s;
+  InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(, 
l);
+  return result;
+}
+
+static std::string ShortSuccessor(const std::string& s) {
+  std::string result = s;
+  InternalKeyComparator(BytewiseComparator()).FindShortSuccessor();
+  return result;
+}
+
+static void TestKey(const std::string& key,
+uint64_t seq,
+ValueType vt) {
+  std::string encoded = IKey(key, seq, vt);
+
+  Slice in(encoded);
+  ParsedInternalKey decoded("", 0, kTypeValue);
+
+  ASSERT_TRUE(ParseInternalKey(in, ));
+  ASSERT_EQ(key, decoded.user_key.ToString());
+  ASSERT_EQ(seq, decoded.sequence);
+  ASSERT_EQ(vt, decoded.type);
+
+  ASSERT_TRUE(!ParseInternalKey(Slice("bar"), ));
+}
+
+class FormatTest { };
+
+TEST(FormatTest, InternalKey_EncodeDecode) {
+  const char* keys[] = { "", "k", "hello", "longg" };
+  const uint64_t seq[] = {
+1, 2, 3,
+(1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
+(1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
+(1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
+  };
+  for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
+for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
+  TestKey(keys[k], seq[s], kTypeValue);
+  TestKey("hello", 1, kTypeDeletion);
+}
+  }
+}
+
+TEST(FormatTest, InternalKeyShortSeparator) {
+  // When user keys are same
+  ASSERT_EQ(IKey("foo", 100, kTypeValue),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("foo", 99, kTypeValue)));
+  ASSERT_EQ(IKey("foo", 100, kTypeValue),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("foo", 101, kTypeValue)));
+  ASSERT_EQ(IKey("foo", 100, kTypeValue),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("foo", 100, kTypeValue)));
+  ASSERT_EQ(IKey("foo", 100, kTypeValue),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("foo", 100, kTypeDeletion)));
+
+  // When user keys are misordered
+  ASSERT_EQ(IKey("foo", 100, kTypeValue),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("bar", 99, kTypeValue)));
+
+  // When user keys are different, but correctly ordered
+  ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("hello", 200, kTypeValue)));
+
+  // When start user key is prefix of limit user key
+  ASSERT_EQ(IKey("foo", 100, kTypeValue),
+Shorten(IKey("foo", 100, kTypeValue),
+IKey("foobar", 200, kTypeValue)));
+
+  // When limit user key is prefix of start user key
+  ASSERT_EQ(IKey("foobar", 100, kTypeValue),
+Shorten(IKey("foobar", 100, kTypeValue),
+IKey("foo", 200, kTypeValue)));
+}
+
+TEST(FormatTest, InternalKeyShortestSuccessor) {
+  ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+ShortSuccessor(IKey("foo", 100, kTypeValue)));
+  ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue),
+ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
+}
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  return leveldb::test::RunAllTests();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/filename.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/filename.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/filename.cc
new file mode 100644
index 000..da32946
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/filename.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights 

[40/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/asyoulik.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/asyoulik.txt
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/asyoulik.txt
new file mode 100644
index 000..88dc7b6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/asyoulik.txt
@@ -0,0 +1,4122 @@
+   AS YOU LIKE IT
+
+
+   DRAMATIS PERSONAE
+
+
+DUKE SENIORliving in banishment.
+
+DUKE FREDERICK his brother, an usurper of his dominions.
+
+
+AMIENS |
+   |  lords attending on the banished duke.
+JAQUES |
+
+
+LE BEAUa courtier attending upon Frederick.
+
+CHARLESwrestler to Frederick.
+
+
+OLIVER |
+   |
+JAQUES (JAQUES DE BOYS:)   |  sons of Sir Rowland de Boys.
+   |
+ORLANDO|
+
+
+ADAM   |
+   |  servants to Oliver.
+DENNIS |
+
+
+TOUCHSTONE a clown.
+
+SIR OLIVER MARTEXT a vicar.
+
+
+CORIN  |
+   |  shepherds.
+SILVIUS|
+
+
+WILLIAMa country fellow in love with Audrey.
+
+   A person representing HYMEN. (HYMEN:)
+
+ROSALIND   daughter to the banished duke.
+
+CELIA  daughter to Frederick.
+
+PHEBE  a shepherdess.
+
+AUDREY a country wench.
+
+   Lords, pages, and attendants, 
+   (Forester:)
+   (A Lord:)
+   (First Lord:)
+   (Second Lord:)
+   (First Page:)
+   (Second Page:)
+
+
+SCENE  Oliver's house; Duke Frederick's court; and the
+   Forest of Arden.
+
+
+
+
+   AS YOU LIKE IT
+
+
+ACT I
+
+
+
+SCENE IOrchard of Oliver's house.
+
+
+   [Enter ORLANDO and ADAM]
+
+ORLANDOAs I remember, Adam, it was upon this fashion
+   bequeathed me by will but poor a thousand crowns,
+   and, as thou sayest, charged my brother, on his
+   blessing, to breed me well: and there begins my
+   sadness. My brother Jaques he keeps at school, and
+   report speaks goldenly of his profit: for my part,
+   he keeps me rustically at home, or, to speak more
+   properly, stays me here at home unkept; for call you
+   that keeping for a gentleman of my birth, that
+   differs not from the stalling of an ox? His horses
+   are bred better; for, besides that they are fair
+   with their feeding, they are taught their manage,
+   and to that end riders dearly hired: but I, his
+   brother, gain nothing under him but growth; for the
+   which his animals on his dunghills are as much
+   bound to him as I. Besides this nothing that he so
+   plentifully gives me, the something that nature gave
+   me his countenance seems to take from me: he lets
+   me feed with his hinds, bars me the place of a
+   brother, and, as much as in him lies, mines my
+   gentility with my education. This is it, Adam, that
+   grieves me; and the spirit of my father, which I
+   think is within me, begins to mutiny against this
+   servitude: I will no longer endure it, though yet I
+   know no wise remedy how to avoid it.
+
+ADAM   Yonder comes my master, your brother.
+
+ORLANDOGo apart, Adam, and thou shalt hear how he will
+   shake me up.
+
+   [Enter OLIVER]
+
+OLIVER Now, sir! what make you here?
+
+ORLANDONothing: I am not taught to make any thing.
+
+OLIVER What mar you then, sir?
+
+ORLANDOMarry, sir, I am helping you to mar that which God
+   made, a poor unworthy brother of yours, with idleness.
+
+OLIVER Marry, sir, be better employed, and be naught awhile.
+
+ORLANDOShall I keep your hogs and eat husks with them?
+   What prodigal portion have I spent, that I should
+   come to such penury?
+
+OLIVER Know you where your are, sir?
+
+ORLANDOO, sir, very well; here in your orchard.
+
+OLIVER Know you before whom, sir?
+
+ORLANDOAy, better than him I am before knows me. I know
+   you are my eldest brother; and, in the gentle
+   condition of blood, you should so know me. The
+   courtesy of nations allows you my better, in that
+   you are the first-born; but the same tradition
+   takes not away my blood, were there twenty brothers
+   betwixt us: I have as much of my father in me as
+   you; albeit, I confess, your coming before me is
+   nearer to his reverence.
+
+OLIVER What, boy!
+
+ORLANDOCome, come, elder brother, you are too young in this.
+
+OLIVER Wilt thou lay hands on me, villain?
+
+ORLANDOI am no villain; I am the youngest son of Sir
+   Rowland de Boys; he was my father, and he is thrice
+   a villain that says such a father begot villains.
+   Wert thou not my brother, I would not take this hand
+   from thy throat till this other had pulled out thy
+   tongue for 

[06/50] [abbrv] hadoop git commit: Remove getPreferredBlockReplication().

2016-01-05 Thread wheat9
Remove getPreferredBlockReplication().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf23b986
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf23b986
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf23b986

Branch: refs/heads/feature-HDFS-8286
Commit: cf23b98670dc1c2270bad82f08c39d219c130718
Parents: 1d8590d
Author: Haohui Mai 
Authored: Tue May 12 13:45:15 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:56 2015 -0700

--
 .../server/blockmanagement/BlockCollection.java |  6 
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 19 ++
 .../snapshot/FileWithSnapshotFeature.java   | 38 ++--
 .../blockmanagement/TestBlockManager.java   |  2 --
 .../snapshot/TestFileWithSnapshotFeature.java   |  1 -
 .../namenode/snapshot/TestSnapshotDeletion.java |  9 +++--
 .../snapshot/TestSnapshotReplication.java   | 30 +---
 8 files changed, 54 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf23b986/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index c0a959c..0ee0439 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -55,12 +55,6 @@ public interface BlockCollection {
   public long getPreferredBlockSize();
 
   /**
-   * Get block replication for the collection 
-   * @return block replication value
-   */
-  public short getPreferredBlockReplication();
-
-  /** 
* @return the storage policy ID.
*/
   public byte getStoragePolicyID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf23b986/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 6a830ee..d7c463a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -87,7 +87,7 @@ class FSDirWriteFileOp {
 
 // update space consumed
 fsd.updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
-fileNode.getPreferredBlockReplication(), true);
+uc.getReplication(), true);
 return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf23b986/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index fb25e9d..60f3ad6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -354,20 +354,6 @@ public class INodeFile extends INodeWithAdditionalFields
 return getFileReplication(CURRENT_STATE_ID);
   }
 
-  @Override // BlockCollection
-  public short getPreferredBlockReplication() {
-short max = getFileReplication(CURRENT_STATE_ID);
-FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
-if (sf != null) {
-  short maxInSnapshot = sf.getMaxBlockRepInDiffs();
-  if (sf.isCurrentFileDeleted()) {
-return maxInSnapshot;
-  }
-  max = maxInSnapshot > max ? maxInSnapshot : max;
-}
-return max;
-  }
-
   /** Set the replication factor of this file. */
   public final void setFileReplication(short replication) {
 header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
@@ -404,7 +390,7 @@ public class INodeFile extends INodeWithAdditionalFields
 
   private void setStoragePolicyID(byte storagePolicyId) {
 header = 

[50/50] [abbrv] hadoop git commit: Cache LevelDB Snapshot reference.

2016-01-05 Thread wheat9
Cache LevelDB Snapshot reference.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2425034b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2425034b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2425034b

Branch: refs/heads/feature-HDFS-8286
Commit: 2425034b643087f106a7cc6eee5551275105872c
Parents: 13f7b64
Author: Haohui Mai 
Authored: Wed Jun 3 14:59:43 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:02 2015 -0700

--
 .../hdfs/server/namenode/FSDirectory.java   | 45 +++-
 .../server/namenode/LevelDBROTransaction.java   | 11 ++---
 .../server/namenode/LevelDBRWTransaction.java   |  1 +
 3 files changed, 48 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2425034b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index b3c6083..ca2d209 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.hdfsdb.Options;
+import org.apache.hadoop.hdfs.hdfsdb.Snapshot;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@@ -74,6 +75,7 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
@@ -186,11 +188,36 @@ public class FSDirectory implements Closeable {
   private INodeAttributeProvider attributeProvider;
   private final boolean enableLevelDb;
   private final org.apache.hadoop.hdfs.hdfsdb.DB levelDb;
+  private AtomicReference currentSnapshot = new AtomicReference<>();
 
   org.apache.hadoop.hdfs.hdfsdb.DB getLevelDb() {
 return levelDb;
   }
 
+  Snapshot currentLevelDbSnapshot() {
+assert hasReadLock();
+if (currentSnapshot.get() == null) {
+  synchronized (currentSnapshot) {
+if (currentSnapshot.get() == null) {
+  currentSnapshot.set(levelDb.snapshot());
+}
+  }
+}
+return currentSnapshot.get();
+  }
+
+  void clearCurrentLevelDBSnapshot() {
+assert hasWriteLock();
+Snapshot s = currentSnapshot.get();
+if (s != null) {
+  try {
+s.close();
+  } catch (Exception e) {
+throw new RuntimeException(e);
+  }
+  currentSnapshot.set(null);
+}
+  }
   public void setINodeAttributeProvider(INodeAttributeProvider provider) {
 attributeProvider = provider;
   }
@@ -438,7 +465,23 @@ public class FSDirectory implements Closeable {
* Shutdown the filestore
*/
   @Override
-  public void close() throws IOException {}
+  public void close() throws IOException {
+if (enableLevelDb) {
+  writeLock();
+  try {
+if (currentSnapshot.get() != null) {
+  try {
+currentSnapshot.get().close();
+  } catch (Exception e) {
+throw new IOException(e);
+  }
+}
+levelDb.close();
+  } finally {
+writeUnlock();
+  }
+}
+  }
 
   void markNameCacheInitialized() {
 writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2425034b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
index 50d8c30..174443a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
@@ -30,7 +30,6 @@ import static 
org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
 class LevelDBROTransaction extends ROTransaction {
   private final 

[28/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
[partial-ns] Import HDFSDB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a6419f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a6419f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a6419f4

Branch: refs/heads/feature-HDFS-8286
Commit: 4a6419f4ac5e84bcafe6bfaa6cc5663cfc8283ad
Parents: 36cdcd7
Author: Haohui Mai 
Authored: Thu Sep 18 15:41:58 2014 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:59 2015 -0700

--
 .../src/main/native/hdfsdb/AUTHORS  |   12 +
 .../src/main/native/hdfsdb/LICENSE  |   27 +
 .../src/main/native/hdfsdb/Makefile |  215 ++
 .../hadoop-hdfsdb/src/main/native/hdfsdb/NEWS   |   17 +
 .../hadoop-hdfsdb/src/main/native/hdfsdb/README |   51 +
 .../src/main/native/hdfsdb/README.hdfsdb|6 +
 .../src/main/native/hdfsdb/README.hdfsdb~   |1 +
 .../hadoop-hdfsdb/src/main/native/hdfsdb/TODO   |   14 +
 .../main/native/hdfsdb/build_detect_platform|  221 ++
 .../main/native/hdfsdb/db/autocompact_test.cc   |  118 +
 .../src/main/native/hdfsdb/db/builder.cc|   88 +
 .../src/main/native/hdfsdb/db/builder.h |   34 +
 .../src/main/native/hdfsdb/db/c.cc  |  595 +
 .../src/main/native/hdfsdb/db/c_test.c  |  390 
 .../main/native/hdfsdb/db/corruption_test.cc|  374 +++
 .../src/main/native/hdfsdb/db/db_bench.cc   |  979 
 .../src/main/native/hdfsdb/db/db_impl.cc| 1513 +
 .../src/main/native/hdfsdb/db/db_impl.h |  211 ++
 .../src/main/native/hdfsdb/db/db_iter.cc|  317 +++
 .../src/main/native/hdfsdb/db/db_iter.h |   28 +
 .../src/main/native/hdfsdb/db/db_test.cc| 2128 ++
 .../src/main/native/hdfsdb/db/dbformat.cc   |  140 ++
 .../src/main/native/hdfsdb/db/dbformat.h|  230 ++
 .../src/main/native/hdfsdb/db/dbformat_test.cc  |  112 +
 .../src/main/native/hdfsdb/db/filename.cc   |  144 ++
 .../src/main/native/hdfsdb/db/filename.h|   85 +
 .../src/main/native/hdfsdb/db/filename_test.cc  |  123 +
 .../src/main/native/hdfsdb/db/leveldb_main.cc   |  238 ++
 .../src/main/native/hdfsdb/db/log_format.h  |   35 +
 .../src/main/native/hdfsdb/db/log_reader.cc |  266 +++
 .../src/main/native/hdfsdb/db/log_reader.h  |  108 +
 .../src/main/native/hdfsdb/db/log_test.cc   |  530 +
 .../src/main/native/hdfsdb/db/log_writer.cc |  103 +
 .../src/main/native/hdfsdb/db/log_writer.h  |   48 +
 .../src/main/native/hdfsdb/db/memtable.cc   |  145 ++
 .../src/main/native/hdfsdb/db/memtable.h|   91 +
 .../src/main/native/hdfsdb/db/repair.cc |  461 
 .../src/main/native/hdfsdb/db/skiplist.h|  379 
 .../src/main/native/hdfsdb/db/skiplist_test.cc  |  378 
 .../src/main/native/hdfsdb/db/snapshot.h|   66 +
 .../src/main/native/hdfsdb/db/table_cache.cc|  127 ++
 .../src/main/native/hdfsdb/db/table_cache.h |   61 +
 .../src/main/native/hdfsdb/db/version_edit.cc   |  266 +++
 .../src/main/native/hdfsdb/db/version_edit.h|  107 +
 .../main/native/hdfsdb/db/version_edit_test.cc  |   46 +
 .../src/main/native/hdfsdb/db/version_set.cc| 1484 
 .../src/main/native/hdfsdb/db/version_set.h |  396 
 .../main/native/hdfsdb/db/version_set_test.cc   |  179 ++
 .../src/main/native/hdfsdb/db/write_batch.cc|  147 ++
 .../native/hdfsdb/db/write_batch_internal.h |   49 +
 .../main/native/hdfsdb/db/write_batch_test.cc   |  120 +
 .../native/hdfsdb/doc/bench/db_bench_sqlite3.cc |  718 ++
 .../native/hdfsdb/doc/bench/db_bench_tree_db.cc |  528 +
 .../src/main/native/hdfsdb/doc/benchmark.html   |  459 
 .../src/main/native/hdfsdb/doc/doc.css  |   89 +
 .../src/main/native/hdfsdb/doc/impl.html|  213 ++
 .../src/main/native/hdfsdb/doc/index.html   |  549 +
 .../src/main/native/hdfsdb/doc/log_format.txt   |   75 +
 .../src/main/native/hdfsdb/doc/table_format.txt |  104 +
 .../main/native/hdfsdb/helpers/memenv/memenv.cc |  384 
 .../main/native/hdfsdb/helpers/memenv/memenv.h  |   20 +
 .../native/hdfsdb/helpers/memenv/memenv_test.cc |  232 ++
 .../src/main/native/hdfsdb/include/leveldb/c.h  |  290 +++
 .../main/native/hdfsdb/include/leveldb/cache.h  |   99 +
 .../native/hdfsdb/include/leveldb/comparator.h  |   63 +
 .../src/main/native/hdfsdb/include/leveldb/db.h |  161 ++
 .../main/native/hdfsdb/include/leveldb/env.h|  333 +++
 .../hdfsdb/include/leveldb/filter_policy.h  |   70 +
 .../native/hdfsdb/include/leveldb/iterator.h|  100 +
 .../native/hdfsdb/include/leveldb/options.h |  195 ++
 .../main/native/hdfsdb/include/leveldb/slice.h  |  109 +
 .../main/native/hdfsdb/include/leveldb/status.h |  106 +
 .../main/native/hdfsdb/include/leveldb/table.h  |   85 +
 

[38/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/html 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/html
new file mode 100644
index 000..ef768cc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/html
@@ -0,0 +1 @@
+  content: @ 10998720: 'HTTP/1.1 200 
OK\r\nX-Google-Crawl-Date: Mon, 08 Nov 2004 17:22:09 GMT\r\nContent-Type: 
text/html\r\nConnection: close\r\nX-Powered-By: PHP/4.3.8\r\nServer: 
Apache/1.3.31 (Unix) mod_gzip/1.3.19.1a PHP/4.3.8\r\nDate: Mon, 08 Nov 2004 
17:19:07 GMT\r\n\r\nhttp://www.w3.org/TR/html4/loose.dtd\;>\r\n\r\n\r\n\r\n\r\n\nMicro Achat : Ordinateurs, PDA -  Toute 
l\'informatique avec 01Informatique, L\'Ordinateur Individuel, Micro Hebdo, 
D\351cision Informatique et 01R\351seaux\r\n\r\n\r\nhttp://www.01net.com/styles/01net.css\;>\r\nhttp://www.01net.com/styles/tableau_autre.css\;>\r\n\r\nA{TEXT-DECORATION: 
none;color:#00;}\r\nA:visited{TEXT-DECORATION: 
none;color:#00;}\r\n\r\n\r\nvar 
sameAsBigDay = new Date();\r\nvar ord = (sameAsBigDay.getTime());\r\nvar 
pubsMotsCles = \"\";\r\nvar exc;\r\nvar lienhautdepage = \"\";\r\nvar 
etatmessage = false;\r\nvar enchainement = 
false;\r\nsas_tmstp=Math.round(Math.random()*100);\r\nfunction 
SmartAdServer(sas_pageid,sas_formatid,sas_master,sas_target) {\r\n if 
((sas_master!=\'M\')&&(sas_master!=\'S\')) {sas_master=\'S\'};\r\n 
document.write(\'http://www.smartadserver.com/call/pubj/\' + 
sas_pageid + \'/\' + sas_formatid + \'/\'+sas_master + \'/\' + sas_tmstp + 
\'/\' + escape(sas_target) + 
\'?\">\');\r\n}\r\n\r\nhttp://www.01net.com/js/exc.js\";>\r\nhttp://telecharger.01net.com/shopping/js/headservicesV4.js\";>\r\n\r\nhttp://cinema.01net.com\;>T\351l\351charger des bandes-annonces, des 
teasers, des extraits et des making of de vos films cin\351ma 
pr\351f\351r\351s,\r\nhttp://www.01net.com/BusinessTechnologies/\;>Portail leader de 
l\'information sur les nouvelles technologies - Actualites et enquetes sur 
l\'informatique et les NTIC (nouvelles technologies de l\'information et la 
communication). Exemples de sujets : e-business, telecoms, CRM, VPN, 3G, DSL, 
fournisseurs d\'acces, Linux, piratage, securite informatique, 
progiciel...\r\nhttp://www.01net.com/securite_informatique.html\;>s\351curit\351 
informatique,\r\nhttp://www.01net.com/strategies_ntic.html\;>strat\351gies 
NTIC,\r\nhttp://www.01net.com/piratage_informatique.html\;>pirata
 ge informatique,\r\nhttp://www.01net.com/technologie_dsl.html\;>technologie DSL,\r\nhttp://www.01net.com/solutions_vpn.html\;>solutions VPN,\r\nhttp://www.01net.com/strategie_e-business.html\;>strat\351gies,\r\nhttp://www.01net.com/outils_crm.html\;>outils CRM,\r\nhttp://www.01net.com/logiciel_linux.html\;>logiciel Linux,\r\nhttp://www.01net.com/fournisseur_d_acces.html\;>fournisseur 
d\'acc\350s,\r\nhttp://www.01net.com/progiciel.html\;>progiciel,\r\nhttp://www.01net.com/stockage_de_donnees.html\;>stockage de 
donn\351es,\r\nhttp://www.01net.com/etudes_solution_informatique.html\;>\351tudes 
solution informatique,\r\nhttp://www.01net.com/NTIC.html\;>NTIC,\r\nhttp://www.01net.com/actualite_technologie.html\;>actualit\351 
technologie,\r\nhttp://www.01net.com/etude_materiel_informatique.html\;>\351tude 
mat\351riel informat
 ique,\r\nhttp://www.01net.com/actus/\;>actualit\351s des 
nouvelles technologies,\r\nhttp://www.01net.com/MicroPerso/\;>enqu\352tes et trucs et astuces sur 
le mat\351riel informatique, les logiciels, les fournisseurs 
d\'acc\350s...,\r\nhttp://www.01net.com/emploi/\;>emploi et 
formations informatiques : offres d\'emploi informatique...,\r\nhttp://www.01net.com/produits/\;>fiches, tests mat\351riels et logiciels 
en informatique,\r\nhttp://www.01net.com/01informatique\;>01 
Informatique,\r\nhttp://www.caractere.net\;>Caract\350re, le site 
des professionnels de l\'imprim\351 - communication imprim\351e, \351dition, 
emballage, encres, finition, flexographie, h\351liogravure, impression, 
imprimerie, imprimeur, industries graphiques, num\351rique, offset, papier, 
pr\351presse, presse, print, reprographie, s\351rigraphie, 
typographie,\r\nhttp://www.01net.com/decisionmicro\;>Decision 
Micro,\r\
 nhttp://www.01net.com/decisioninformatique\;>D\351cision 
Informatique,\r\nhttp://www.01net.com/01reseaux\;>01 
R\351seaux,\r\nhttp://logiciel.telecharger.01net.com/\;>logiciel,\r\nhttp://developpeur.telecharger.01net.com/windows/Programmation/\;>d\351veloppeur
 programmation,\r\nhttp://anti-virus.telecharger.01net.com\;>anti-virus,\r\nhttp://gravure-cd-dvd.telecharger.01net.com/windows/Utilitaire/gravure/\;>gravure
 cd 

[46/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
[partial-ns] Import snappy in hdfsdb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb5ba73b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb5ba73b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb5ba73b

Branch: refs/heads/feature-HDFS-8286
Commit: cb5ba73bf65b137f21304e7d109352636733762a
Parents: 6f99a9d
Author: Haohui Mai 
Authored: Fri Sep 19 10:44:30 2014 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:00 2015 -0700

--
 .../src/main/native/hdfsdb/CMakeLists.txt   |26 +-
 .../src/main/native/snappy/AUTHORS  | 1 +
 .../src/main/native/snappy/COPYING  |54 +
 .../src/main/native/snappy/ChangeLog|  1916 +++
 .../src/main/native/snappy/Makefile.am  |23 +
 .../hadoop-hdfsdb/src/main/native/snappy/NEWS   |   128 +
 .../hadoop-hdfsdb/src/main/native/snappy/README |   135 +
 .../src/main/native/snappy/autogen.sh   | 7 +
 .../src/main/native/snappy/configure.ac |   133 +
 .../main/native/snappy/format_description.txt   |   110 +
 .../src/main/native/snappy/framing_format.txt   |   135 +
 .../src/main/native/snappy/m4/gtest.m4  |74 +
 .../src/main/native/snappy/snappy-c.cc  |90 +
 .../src/main/native/snappy/snappy-c.h   |   138 +
 .../src/main/native/snappy/snappy-internal.h|   150 +
 .../src/main/native/snappy/snappy-sinksource.cc |71 +
 .../src/main/native/snappy/snappy-sinksource.h  |   137 +
 .../main/native/snappy/snappy-stubs-internal.cc |42 +
 .../main/native/snappy/snappy-stubs-internal.h  |   491 +
 .../main/native/snappy/snappy-stubs-public.h|82 +
 .../main/native/snappy/snappy-stubs-public.h.in |98 +
 .../src/main/native/snappy/snappy-test.cc   |   606 +
 .../src/main/native/snappy/snappy-test.h|   582 +
 .../src/main/native/snappy/snappy.cc|  1306 ++
 .../src/main/native/snappy/snappy.h |   184 +
 .../src/main/native/snappy/snappy_unittest.cc   |  1355 +++
 .../src/main/native/snappy/testdata/alice29.txt |  3609 ++
 .../main/native/snappy/testdata/asyoulik.txt|  4122 +++
 .../main/native/snappy/testdata/baddata1.snappy |   Bin 0 -> 27512 bytes
 .../main/native/snappy/testdata/baddata2.snappy |   Bin 0 -> 27483 bytes
 .../main/native/snappy/testdata/baddata3.snappy |   Bin 0 -> 28384 bytes
 .../main/native/snappy/testdata/fireworks.jpeg  |   Bin 0 -> 123093 bytes
 .../main/native/snappy/testdata/geo.protodata   |   Bin 0 -> 118588 bytes
 .../src/main/native/snappy/testdata/html| 1 +
 .../src/main/native/snappy/testdata/html_x_4| 1 +
 .../src/main/native/snappy/testdata/kppkn.gtb   |   Bin 0 -> 184320 bytes
 .../src/main/native/snappy/testdata/lcet10.txt  |  7519 
 .../main/native/snappy/testdata/paper-100k.pdf  |   598 +
 .../main/native/snappy/testdata/plrabn12.txt| 10699 +
 .../src/main/native/snappy/testdata/urls.10K| 1 +++
 40 files changed, 44620 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
index eb2b5b2..a0fc53f 100644
--- a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
@@ -1,7 +1,9 @@
 cmake_minimum_required(VERSION 2.8)
-PROJECT(hdfsdb)
+project(hdfsdb)
 
-ENABLE_TESTING()
+include(CheckIncludeFiles)
+
+enable_testing()
 
 set(VERSION_MAJOR 1)
 set(VERSION_MINOR 15)
@@ -23,12 +25,30 @@ set(HDFSDBSRCS db/builder.cc db/db_impl.cc db/db_iter.cc 
db/dbformat.cc
   util/logging.cc util/options.cc util/status.cc
 )
 
+add_definitions(-DSNAPPY)
+include_directories(../snappy)
+set(SNAPPY_SRCS ../snappy/snappy.cc ../snappy/snappy-sinksource.cc 
../snappy/snappy-stubs-internal.cc)
+
+check_include_files("sys/uio.h" HAVE_SYS_UIO_H)
+if (HAVE_SET_UIO_H)
+add_definition(-DHAVE_SYS_UIO_H)
+endif()
+
+if(APPLE)
+add_definitions(-DOS_MACOSX -DLEVELDB_PLATFORM_POSIX)
+set(PORT_SRCS port/port_posix.cc util/env_posix.cc)
+elseif("${CMAKE_SYSTEM}" MATCHES "Linux")
+add_definitions(-DOS_LINUX -DLEVELDB_PLATFORM_POSIX)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp -pthread -fPIC")
+set(PORT_SRCS port/port_posix.cc util/env_posix.cc)
+endif()
+
 if(APPLE)
 add_definitions(-DOS_MACOSX -DLEVELDB_PLATFORM_POSIX)
 set(PORT_SRCS port/port_posix.cc util/env_posix.cc)
 endif()
 
-add_library(hdfsdb STATIC ${HDFSDBSRCS} 

[39/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata1.snappy
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata1.snappy
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata1.snappy
new file mode 100644
index 000..99d970f
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata1.snappy
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata2.snappy
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata2.snappy
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata2.snappy
new file mode 100644
index 000..8f5cb13
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata2.snappy
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata3.snappy
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata3.snappy
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata3.snappy
new file mode 100644
index 000..774aead
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/baddata3.snappy
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/fireworks.jpeg
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/fireworks.jpeg
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/fireworks.jpeg
new file mode 100644
index 000..078cf17
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/fireworks.jpeg
 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/geo.protodata
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/geo.protodata
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/geo.protodata
new file mode 100644
index 000..c4e3e0d
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/geo.protodata
 differ



[33/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/plrabn12.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/plrabn12.txt
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/plrabn12.txt
new file mode 100644
index 000..dfa72b6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/plrabn12.txt
@@ -0,0 +1,10699 @@
+
+This is the February 1992 Project Gutenberg release of: 
+ 
+Paradise Lost by John Milton 
+ 
+The oldest etext known to Project Gutenberg (ca. 1964-1965) 
+(If you know of any older ones, please let us know.) 
+ 
+ 
+Introduction  (one page) 
+ 
+This etext was originally created in 1964-1965 according to Dr. 
+Joseph Raben of Queens College, NY, to whom it is attributed by 
+Project Gutenberg.  We had heard of this etext for years but it 
+was not until 1991 that we actually managed to track it down to 
+a specific location, and then it took months to convince people 
+to let us have a copy, then more months for them actually to do 
+the copying and get it to us.  Then another month to convert to 
+something we could massage with our favorite 486 in DOS.  After 
+that is was only a matter of days to get it into this shape you 
+will see below.  The original was, of course, in CAPS only, and 
+so were all the other etexts of the 60's and early 70's.  Don't 
+let anyone fool you into thinking any etext with both upper and 
+lower case is an original; all those original Project Gutenberg 
+etexts were also in upper case and were translated or rewritten 
+many times to get them into their current condition.  They have 
+been worked on by many people throughout the world. 
+ 
+In the course of our searches for Professor Raben and his etext 
+we were never able to determine where copies were or which of a 
+variety of editions he may have used as a source.  We did get a 
+little information here and there, but even after we received a 
+copy of the etext we were unwilling to release it without first 
+determining that it was in fact Public Domain and finding Raben 
+to verify this and get his permission.  Interested enough, in a 
+totally unrelated action to our searches for him, the professor 
+subscribed to the Project Gutenberg listserver and we happened, 
+by accident, to notice his name. (We don't really look at every 
+subscription request as the computers usually handle them.) The 
+etext was then properly identified, copyright analyzed, and the 
+current edition prepared. 
+ 
+To give you an estimation of the difference in the original and 
+what we have today:  the original was probably entered on cards 
+commonly known at the time as "IBM cards" (Do Not Fold, Spindle 
+or Mutilate) and probably took in excess of 100,000 of them.  A 
+single card could hold 80 characters (hence 80 characters is an 
+accepted standard for so many computer margins), and the entire 
+original edition we received in all caps was over 800,000 chars 
+in length, including line enumeration, symbols for caps and the 
+punctuation marks, etc., since they were not available keyboard 
+characters at the time (probably the keyboards operated at baud 
+rates of around 113, meaning the typists had to type slowly for 
+the keyboard to keep up). 
+ 
+This is the second version of Paradise Lost released by Project 
+Gutenberg.  The first was released as our October, 1991 etext. 
+ 
+ 
+ 
+ 
+ 
+Paradise Lost 
+ 
+ 
+ 
+ 
+Book I 
+ 
+ 
+Of Man's first disobedience, and the fruit 
+Of that forbidden tree whose mortal taste 
+Brought death into the World, and all our woe, 
+With loss of Eden, till one greater Man 
+Restore us, and regain the blissful seat, 
+Sing, Heavenly Muse, that, on the secret top 
+Of Oreb, or of Sinai, didst inspire 
+That shepherd who first taught the chosen seed 
+In the beginning how the heavens and earth 
+Rose out of Chaos: or, if Sion hill 
+Delight thee more, and Siloa's brook that flowed 
+Fast by the oracle of God, I thence 
+Invoke thy aid to my adventurous song, 
+That with no middle flight intends to soar 
+Above th' Aonian mount, while it pursues 
+Things unattempted yet in prose or rhyme. 
+And chiefly thou, O Spirit, that dost prefer 
+Before all temples th' upright heart and pure, 
+Instruct me, for thou know'st; thou from the first 
+Wast present, and, with mighty wings outspread, 
+Dove-like sat'st brooding on the vast Abyss, 
+And mad'st it pregnant: what in me is dark 
+Illumine, what is low raise and support; 
+That, to the height of this great argument, 
+I may assert Eternal Providence, 
+And justify the ways of God to men. 
+  Say first--for Heaven hides nothing from thy view, 
+Nor the deep tract of Hell--say first what cause 
+Moved our grand parents, in that happy state, 
+Favoured of Heaven so highly, to fall off 
+From their Creator, and 

[19/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/filter_block_test.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/filter_block_test.cc
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/filter_block_test.cc
new file mode 100644
index 000..8c4a474
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/filter_block_test.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "table/filter_block.h"
+
+#include "leveldb/filter_policy.h"
+#include "util/coding.h"
+#include "util/hash.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+// For testing: emit an array with one hash value per key
+class TestHashFilter : public FilterPolicy {
+ public:
+  virtual const char* Name() const {
+return "TestHashFilter";
+  }
+
+  virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+for (int i = 0; i < n; i++) {
+  uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
+  PutFixed32(dst, h);
+}
+  }
+
+  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+uint32_t h = Hash(key.data(), key.size(), 1);
+for (size_t i = 0; i + 4 <= filter.size(); i += 4) {
+  if (h == DecodeFixed32(filter.data() + i)) {
+return true;
+  }
+}
+return false;
+  }
+};
+
+class FilterBlockTest {
+ public:
+  TestHashFilter policy_;
+};
+
+TEST(FilterBlockTest, EmptyBuilder) {
+  FilterBlockBuilder builder(_);
+  Slice block = builder.Finish();
+  ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
+  FilterBlockReader reader(_, block);
+  ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
+  ASSERT_TRUE(reader.KeyMayMatch(10, "foo"));
+}
+
+TEST(FilterBlockTest, SingleChunk) {
+  FilterBlockBuilder builder(_);
+  builder.StartBlock(100);
+  builder.AddKey("foo");
+  builder.AddKey("bar");
+  builder.AddKey("box");
+  builder.StartBlock(200);
+  builder.AddKey("box");
+  builder.StartBlock(300);
+  builder.AddKey("hello");
+  Slice block = builder.Finish();
+  FilterBlockReader reader(_, block);
+  ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
+  ASSERT_TRUE(reader.KeyMayMatch(100, "bar"));
+  ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
+  ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
+  ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
+  ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
+  ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
+}
+
+TEST(FilterBlockTest, MultiChunk) {
+  FilterBlockBuilder builder(_);
+
+  // First filter
+  builder.StartBlock(0);
+  builder.AddKey("foo");
+  builder.StartBlock(2000);
+  builder.AddKey("bar");
+
+  // Second filter
+  builder.StartBlock(3100);
+  builder.AddKey("box");
+
+  // Third filter is empty
+
+  // Last filter
+  builder.StartBlock(9000);
+  builder.AddKey("box");
+  builder.AddKey("hello");
+
+  Slice block = builder.Finish();
+  FilterBlockReader reader(_, block);
+
+  // Check first filter
+  ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
+  ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
+  ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
+  ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
+
+  // Check second filter
+  ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
+  ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
+  ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
+  ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
+
+  // Check third filter (empty)
+  ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
+  ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
+  ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
+  ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
+
+  // Check last filter
+  ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
+  ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
+  ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
+  ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
+}
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  return leveldb::test::RunAllTests();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/format.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/format.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/format.cc
new file mode 100644
index 000..cda1dec
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/table/format.cc
@@ -0,0 +1,145 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license 

[12/50] [abbrv] hadoop git commit: [partial-ns] Implement completeFile().

2016-01-05 Thread wheat9
[partial-ns] Implement completeFile().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba100175
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba100175
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba100175

Branch: refs/heads/feature-HDFS-8286
Commit: ba100175f966da61068c3407bbc53cace8e6f3fb
Parents: aab47e5
Author: Haohui Mai 
Authored: Tue May 19 16:09:38 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:58 2015 -0700

--
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 94 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 55 
 .../hdfs/server/namenode/RWTransaction.java |  4 +
 .../hdfs/server/namenode/TestAddBlockRetry.java | 10 ++-
 4 files changed, 98 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba100175/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index a136b1d..4cb7f2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -710,60 +710,66 @@ class FSDirWriteFileOp {
   FSNamesystem fsn, String src, String holder, Block last, long fileId)
   throws IOException {
 assert fsn.hasWriteLock();
-final INodeFile pendingFile;
-final INodesInPath iip;
-INode inode = null;
-try {
-  if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
+FSDirectory fsd = fsn.getFSDirectory();
+try (RWTransaction tx = fsd.newRWTransaction().begin()) {
+  final Resolver.Result paths;
+  if (true || fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
 // Older clients may not have given us an inode ID to work with.
 // In this case, we have to try to resolve the path and hope it
 // hasn't changed or been deleted since the file was opened for write.
-iip = fsn.dir.getINodesInPath(src, true);
-inode = iip.getLastINode();
+paths = Resolver.resolve(tx, src);
   } else {
-inode = fsn.dir.getInode(fileId);
-iip = INodesInPath.fromINode(inode);
-if (inode != null) {
-  src = iip.getPath();
-}
+// Newer clients pass the inode ID, so we can just get the inode
+// directly.
+paths = Resolver.resolveById(tx, fileId);
   }
-  pendingFile = fsn.checkLease(src, holder, inode, fileId);
-} catch (LeaseExpiredException lee) {
-  if (inode != null && inode.isFile() &&
-  !inode.asFile().isUnderConstruction()) {
-// This could be a retry RPC - i.e the client tried to close
-// the file, but missed the RPC response. Thus, it is trying
-// again to close the file. If the file still exists and
-// the client's view of the last block matches the actual
-// last block, then we'll treat it as a successful close.
-// See HDFS-3031.
-final Block realLastBlock = inode.asFile().getLastBlock();
-if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
-  NameNode.stateChangeLog.info("DIR* completeFile: " +
-  "request from " + holder + " to complete inode " + fileId +
-  "(" + src + ") which is already closed. But, it appears to be " +
-  "an RPC retry. Returning success");
-  return true;
+  if (paths.invalidPath()) {
+throw new InvalidPathException(src);
+  } else if (paths.notFound()) {
+throw new FileNotFoundException(src);
+  }
+  FlatINode inode = paths.inodesInPath().getLastINode();
+  FlatINodeFileFeature file = inode.feature(FlatINodeFileFeature.class);
+  try {
+fsn.checkLease(src, holder, inode);
+  } catch (LeaseExpiredException lee) {
+if (file != null && !file.inConstruction()) {
+  // This could be a retry RPC - i.e the client tried to close
+  // the file, but missed the RPC response. Thus, it is trying
+  // again to close the file. If the file still exists and
+  // the client's view of the last block matches the actual
+  // last block, then we'll treat it as a successful close.
+  // See HDFS-3031.
+  final Block realLastBlock = file.lastBlock();
+  if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
+

[04/50] [abbrv] hadoop git commit: [partial-ns] Implement rename() and rename2().

2016-01-05 Thread wheat9
[partial-ns] Implement rename() and rename2().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a82dbd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a82dbd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a82dbd3

Branch: refs/heads/feature-HDFS-8286
Commit: 7a82dbd33bcdbe76c6f65e71fdabdf5ad9653f5d
Parents: 707775c
Author: Haohui Mai 
Authored: Thu Apr 30 17:22:39 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:55 2015 -0700

--
 .../hdfs/server/namenode/FSDirRenameOp.java | 696 +++
 .../server/namenode/FSDirStatAndListingOp.java  |   3 +-
 .../server/namenode/TestGetBlockLocations.java  |   4 +-
 3 files changed, 264 insertions(+), 439 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a82dbd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index b69bb42..f322186 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Options;
@@ -29,12 +29,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-import org.apache.hadoop.hdfs.util.ReadOnlyList;
-import org.apache.hadoop.util.ChunkedArrayList;
-import org.apache.hadoop.util.Time;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -46,6 +41,7 @@ import java.util.Map;
 
 import static 
org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import static 
org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
+import static org.apache.hadoop.util.Time.now;
 
 class FSDirRenameOp {
   @Deprecated
@@ -63,59 +59,67 @@ class FSDirRenameOp {
   throw new IOException("Invalid name: " + dst);
 }
 FSPermissionChecker pc = fsd.getPermissionChecker();
-
-byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
-byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
 HdfsFileStatus resultingStat = null;
-src = fsd.resolvePath(pc, src, srcComponents);
-dst = fsd.resolvePath(pc, dst, dstComponents);
-@SuppressWarnings("deprecation")
-final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
-if (status) {
-  INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
-  resultingStat = fsd.getAuditFileInfo(dstIIP);
-}
-return new RenameOldResult(status, resultingStat);
+try (RWTransaction tx = fsd.newRWTransaction().begin()) {
+  // Rename does not operate on link targets
+  // Do not resolveLink when checking permissions of src and dst
+  // Check write access to parent of src
+  Resolver.Result srcPaths = Resolver.resolveNoSymlink(tx, src);
+  Resolver.Result dstPaths = Resolver.resolve(tx, dst);
+  @SuppressWarnings("deprecation")
+  final boolean status = renameTo(tx,
+  fsd, pc, srcPaths, dstPaths, logRetryCache);
+  if (status) {
+dstPaths = Resolver.resolve(tx, dst);
+resultingStat = fsd.getAuditFileInfo(dstPaths.inodesInPath());
+  }
+  tx.commit();
+  return new RenameOldResult(status, resultingStat);
+}
   }
 
   /**
* Verify quota for rename operation where srcInodes[srcInodes.length-1] 
moves
* dstInodes[dstInodes.length-1]
*/
-  private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
-  INodesInPath dst) throws QuotaExceededException {
-if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) 
{
-  // Do not check quota if edits log is still being processed
-  return;
-}
-int i = 0;
-while(src.getINode(i) == 

[23/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/version_set.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/version_set.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/version_set.cc
new file mode 100644
index 000..aa83df5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/version_set.cc
@@ -0,0 +1,1484 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/version_set.h"
+
+#include 
+#include 
+#include "db/filename.h"
+#include "db/log_reader.h"
+#include "db/log_writer.h"
+#include "db/memtable.h"
+#include "db/table_cache.h"
+#include "leveldb/env.h"
+#include "leveldb/table_builder.h"
+#include "table/merger.h"
+#include "table/two_level_iterator.h"
+#include "util/coding.h"
+#include "util/logging.h"
+
+namespace leveldb {
+
+static const int kTargetFileSize = 2 * 1048576;
+
+// Maximum bytes of overlaps in grandparent (i.e., level+2) before we
+// stop building a single file in a level->level+1 compaction.
+static const int64_t kMaxGrandParentOverlapBytes = 10 * kTargetFileSize;
+
+// Maximum number of bytes in all compacted files.  We avoid expanding
+// the lower level file set of a compaction if it would make the
+// total compaction cover more than this many bytes.
+static const int64_t kExpandedCompactionByteSizeLimit = 25 * kTargetFileSize;
+
+static double MaxBytesForLevel(int level) {
+  // Note: the result for level zero is not really used since we set
+  // the level-0 compaction threshold based on number of files.
+  double result = 10 * 1048576.0;  // Result for both level-0 and level-1
+  while (level > 1) {
+result *= 10;
+level--;
+  }
+  return result;
+}
+
+static uint64_t MaxFileSizeForLevel(int level) {
+  return kTargetFileSize;  // We could vary per level to reduce number of 
files?
+}
+
+static int64_t TotalFileSize(const std::vector& files) {
+  int64_t sum = 0;
+  for (size_t i = 0; i < files.size(); i++) {
+sum += files[i]->file_size;
+  }
+  return sum;
+}
+
+Version::~Version() {
+  assert(refs_ == 0);
+
+  // Remove from linked list
+  prev_->next_ = next_;
+  next_->prev_ = prev_;
+
+  // Drop references to files
+  for (int level = 0; level < config::kNumLevels; level++) {
+for (size_t i = 0; i < files_[level].size(); i++) {
+  FileMetaData* f = files_[level][i];
+  assert(f->refs > 0);
+  f->refs--;
+  if (f->refs <= 0) {
+delete f;
+  }
+}
+  }
+}
+
+int FindFile(const InternalKeyComparator& icmp,
+ const std::vector& files,
+ const Slice& key) {
+  uint32_t left = 0;
+  uint32_t right = files.size();
+  while (left < right) {
+uint32_t mid = (left + right) / 2;
+const FileMetaData* f = files[mid];
+if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
+  // Key at "mid.largest" is < "target".  Therefore all
+  // files at or before "mid" are uninteresting.
+  left = mid + 1;
+} else {
+  // Key at "mid.largest" is >= "target".  Therefore all files
+  // after "mid" are uninteresting.
+  right = mid;
+}
+  }
+  return right;
+}
+
+static bool AfterFile(const Comparator* ucmp,
+  const Slice* user_key, const FileMetaData* f) {
+  // NULL user_key occurs before all keys and is therefore never after *f
+  return (user_key != NULL &&
+  ucmp->Compare(*user_key, f->largest.user_key()) > 0);
+}
+
+static bool BeforeFile(const Comparator* ucmp,
+   const Slice* user_key, const FileMetaData* f) {
+  // NULL user_key occurs after all keys and is therefore never before *f
+  return (user_key != NULL &&
+  ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
+}
+
+bool SomeFileOverlapsRange(
+const InternalKeyComparator& icmp,
+bool disjoint_sorted_files,
+const std::vector& files,
+const Slice* smallest_user_key,
+const Slice* largest_user_key) {
+  const Comparator* ucmp = icmp.user_comparator();
+  if (!disjoint_sorted_files) {
+// Need to check against all files
+for (size_t i = 0; i < files.size(); i++) {
+  const FileMetaData* f = files[i];
+  if (AfterFile(ucmp, smallest_user_key, f) ||
+  BeforeFile(ucmp, largest_user_key, f)) {
+// No overlap
+  } else {
+return true;  // Overlap
+  }
+}
+return false;
+  }
+
+  // Binary search over file list
+  uint32_t index = 0;
+  if (smallest_user_key != NULL) {
+// Find the earliest possible internal key for smallest_user_key
+InternalKey small(*smallest_user_key, 
kMaxSequenceNumber,kValueTypeForSeek);
+index 

[13/50] [abbrv] hadoop git commit: [partial-ns] Implement SetPermission().

2016-01-05 Thread wheat9
[partial-ns] Implement SetPermission().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fe5b9a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fe5b9a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fe5b9a5

Branch: refs/heads/feature-HDFS-8286
Commit: 5fe5b9a5058bc160cc2b7c0c0b979e3fd997451b
Parents: ba10017
Author: Haohui Mai 
Authored: Fri May 22 15:17:05 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:58 2015 -0700

--
 .../hdfs/server/namenode/FSDirAttrOp.java   | 50 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  5 ++
 .../hdfs/server/namenode/FSEditLogLoader.java   |  9 +++-
 .../hdfs/server/namenode/RWTransaction.java |  5 ++
 4 files changed, 43 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe5b9a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index ef62e05..f16183f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -49,23 +51,23 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
 
 public class FSDirAttrOp {
   static HdfsFileStatus setPermission(
-  FSDirectory fsd, final String srcArg, FsPermission permission)
+  FSDirectory fsd, final String src, FsPermission permission)
   throws IOException {
-String src = srcArg;
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-INodesInPath iip;
-fsd.writeLock();
-try {
-  src = fsd.resolvePath(pc, src, pathComponents);
-  iip = fsd.getINodesInPath4Write(src);
+try (RWTransaction tx = fsd.newRWTransaction().begin()) {
+  Resolver.Result paths = Resolver.resolve(tx, src);
+  if (paths.invalidPath()) {
+throw new InvalidPathException(src);
+  } else if (paths.notFound()) {
+throw new FileNotFoundException(src);
+  }
+  FlatINodesInPath iip = paths.inodesInPath();
   fsd.checkOwner(pc, iip);
-  unprotectedSetPermission(fsd, src, permission);
-} finally {
-  fsd.writeUnlock();
+  unprotectedSetPermission(tx, src, permission);
+  tx.logSetPermissions(src, permission);
+  tx.commit();
+  return fsd.getAuditFileInfo(iip);
 }
-fsd.getEditLog().logSetPermissions(src, permission);
-return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus setOwner(
@@ -246,18 +248,18 @@ public class FSDirAttrOp {
 }
   }
 
-  static void unprotectedSetPermission(
-  FSDirectory fsd, String src, FsPermission permissions)
-  throws FileNotFoundException, UnresolvedLinkException,
- QuotaExceededException, SnapshotAccessControlException {
-assert fsd.hasWriteLock();
-final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
-final INode inode = inodesInPath.getLastINode();
-if (inode == null) {
-  throw new FileNotFoundException("File does not exist: " + src);
+  static void unprotectedSetPermission(RWTransaction tx, String src,
+  FsPermission permissions) throws IOException {
+Resolver.Result paths = Resolver.resolve(tx, src);
+if (paths.invalidPath()) {
+  throw new InvalidPathException(src);
+} else if (paths.notFound()) {
+  throw new FileNotFoundException(src);
 }
-int snapshotId = inodesInPath.getLatestSnapshotId();
-inode.setPermission(permissions, snapshotId);
+FlatINode inode = paths.inodesInPath().getLastINode();
+ByteString b = new FlatINode.Builder().mergeFrom(inode).permission
+(permissions.toShort()).build();
+tx.putINode(inode.id(), b);
   }
 
   static void unprotectedSetOwner(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe5b9a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

[09/50] [abbrv] hadoop git commit: [partial-ns] Implement startFile().

2016-01-05 Thread wheat9
[partial-ns] Implement startFile().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9c9c72b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9c9c72b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9c9c72b

Branch: refs/heads/feature-HDFS-8286
Commit: e9c9c72b83b4cf8f7bd2dde8d2c0086dec6e7890
Parents: 72e1828
Author: Haohui Mai 
Authored: Tue May 19 13:29:31 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:57 2015 -0700

--
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 288 ++-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  75 +++--
 .../hdfs/server/namenode/FSEditLogOp.java   |  12 +-
 .../hdfs/server/namenode/RWTransaction.java |   5 +
 .../hdfs/server/namenode/CreateEditsLog.java|  26 +-
 .../hdfs/server/namenode/TestEditLog.java   |  25 +-
 6 files changed, 240 insertions(+), 191 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c9c72b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index d7c463a..33e31e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
 import org.apache.commons.io.Charsets;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.crypto.CipherSuite;
@@ -26,6 +27,7 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -47,13 +49,13 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderCon
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -335,105 +337,147 @@ class FSDirWriteFileOp {
 
 boolean isRawPath = FSDirectory.isReservedRawName(src);
 FSDirectory fsd = fsn.getFSDirectory();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
-INodesInPath iip = fsd.getINodesInPath4Write(src);
+final StringMap ugid = fsd.ugid();
 
-// Verify that the destination does not exist as a directory already.
-final INode inode = iip.getLastINode();
-if (inode != null && inode.isDirectory()) {
-  throw new FileAlreadyExistsException(src +
-  " already exists as a directory");
-}
+try (RWTransaction tx = fsd.newRWTransaction().begin()) {
+  Resolver.Result paths = Resolver.resolve(tx, src);
+  if (paths.invalidPath()) {
+throw new InvalidPathException(src);
+  }
+
+  final FlatINodesInPath iip = paths.inodesInPath();
+  // Verify that the destination does not exist as a directory already.
+  if (paths.ok()) {
+FlatINode inode = paths.inodesInPath().getLastINode();
+if (inode.isDirectory()) {
+  throw new FileAlreadyExistsException(src +
+  " already exists as a directory");
+}
 
-final INodeFile myFile = INodeFile.valueOf(inode, src, true);
-if (fsd.isPermissionEnabled()) {
-  if (overwrite && myFile != null) {
-fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+if (fsd.isPermissionEnabled()) {
+  if (overwrite) {
+fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+  }
+}
   }
+
+  if (fsd.isPermissionEnabled()) {
   /*
* To overwrite existing file, need to check 'w' 

[03/50] [abbrv] hadoop git commit: [partial-ns] Implement delete().

2016-01-05 Thread wheat9
[partial-ns] Implement delete().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/707775c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/707775c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/707775c3

Branch: refs/heads/feature-HDFS-8286
Commit: 707775c39ff0c38e1fc530704f7837ad0d58d098
Parents: a56b2a3
Author: Haohui Mai 
Authored: Wed Apr 29 13:45:41 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:55 2015 -0700

--
 .../hdfs/server/namenode/FSDirDeleteOp.java | 212 +--
 .../server/namenode/FSDirStatAndListingOp.java  |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  19 +-
 .../hdfs/server/namenode/FSDirectory.java   |  29 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |  21 +-
 .../server/namenode/TestGetBlockLocations.java  |   7 +-
 6 files changed, 158 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/707775c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 962f4b4..74dcf46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -17,52 +17,40 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
-import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
 
-import static 
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
 import static org.apache.hadoop.util.Time.now;
 
 class FSDirDeleteOp {
   /**
* Delete the target directory and collect the blocks under it
*
-   * @param fsd the FSDirectory instance
-   * @param iip the INodesInPath instance containing all the INodes for the 
path
+   * @param tx the Transaction
+   * @param paths the path to be deleted
* @param collectedBlocks Blocks under the deleted directory
-   * @param removedINodes INodes that should be removed from inodeMap
* @return the number of files that have been removed
*/
-  static long delete(FSDirectory fsd, INodesInPath iip,
-  BlocksMapUpdateInfo collectedBlocks, List removedINodes,
-  List removedUCFiles, long mtime) throws IOException {
+  static long delete(
+  RWTransaction tx, Resolver.Result paths,
+  BlocksMapUpdateInfo collectedBlocks, List removedUCFiles, long 
mtime) throws IOException {
 if (NameNode.stateChangeLog.isDebugEnabled()) {
-  NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + 
iip.getPath());
+  NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + paths.src);
 }
-long filesRemoved = -1;
-fsd.writeLock();
-try {
-  if (deleteAllowed(iip, iip.getPath()) ) {
-List snapshottableDirs = new ArrayList<>();
-FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
-ReclaimContext context = new ReclaimContext(
-fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes,
-removedUCFiles);
-if (unprotectedDelete(fsd, iip, context, mtime)) {
-  filesRemoved = context.quotaDelta().getNsDelta();
-}
-fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
-fsd.updateCount(iip, context.quotaDelta(), false);
-  }
-} finally {
-  fsd.writeUnlock();
+final long filesRemoved;
+if (!deleteAllowed(paths)) {
+  filesRemoved = -1;
+} else {
+  filesRemoved = unprotectedDelete(tx, paths, collectedBlocks,
+   removedUCFiles, mtime);
 }
 return filesRemoved;
   }
@@ -89,19 +77,29 @@ class FSDirDeleteOp {
   throws IOException {
 FSDirectory fsd = fsn.getFSDirectory();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
+try (RWTransaction tx = fsd.newRWTransaction().begin()) {
+  

[11/50] [abbrv] hadoop git commit: [partial-ns] Decouple with FSNamesystem in BlockManager.completeBlock().

2016-01-05 Thread wheat9
[partial-ns] Decouple with FSNamesystem in BlockManager.completeBlock().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aab47e59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aab47e59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aab47e59

Branch: refs/heads/feature-HDFS-8286
Commit: aab47e59c0ed83bd2eddd7d627e49e69384cd83e
Parents: 2618e61
Author: Haohui Mai 
Authored: Tue May 19 17:32:54 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:58 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab47e59/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 548bdfd..afd8bb1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2511,8 +2511,6 @@ public class BlockManager {
   return block;
 }
 long bcId = storedBlock.getBlockCollectionId();
-BlockCollection bc = namesystem.getBlockCollection(bcId);
-assert bc != null : "Block must belong to a file";
 
 // add block to the datanode
 AddBlockResult result = storageInfo.addBlock(storedBlock);
@@ -2547,7 +2545,7 @@ public class BlockManager {
 
 if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
 numLiveReplicas >= minReplication) {
-  storedBlock = completeBlock(bc, storedBlock, false);
+  storedBlock = completeBlock(storedBlock, false);
 } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
   // check whether safe replication is reached for the block
   // only complete blocks are counted towards that
@@ -2556,9 +2554,9 @@ public class BlockManager {
   // handles the safe block count maintenance.
   namesystem.incrementSafeBlockCount(numCurrentReplica);
 }
-
+
 // if file is under construction, then done for now
-if (bc.isUnderConstruction()) {
+if (!storedBlock.isComplete()) {
   return storedBlock;
 }
 



[18/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/bloom_test.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/bloom_test.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/bloom_test.cc
new file mode 100644
index 000..77fb1b3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/bloom_test.cc
@@ -0,0 +1,161 @@
+// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "leveldb/filter_policy.h"
+
+#include "util/coding.h"
+#include "util/logging.h"
+#include "util/testharness.h"
+#include "util/testutil.h"
+
+namespace leveldb {
+
+static const int kVerbose = 1;
+
+static Slice Key(int i, char* buffer) {
+  EncodeFixed32(buffer, i);
+  return Slice(buffer, sizeof(uint32_t));
+}
+
+class BloomTest {
+ private:
+  const FilterPolicy* policy_;
+  std::string filter_;
+  std::vector keys_;
+
+ public:
+  BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
+
+  ~BloomTest() {
+delete policy_;
+  }
+
+  void Reset() {
+keys_.clear();
+filter_.clear();
+  }
+
+  void Add(const Slice& s) {
+keys_.push_back(s.ToString());
+  }
+
+  void Build() {
+std::vector key_slices;
+for (size_t i = 0; i < keys_.size(); i++) {
+  key_slices.push_back(Slice(keys_[i]));
+}
+filter_.clear();
+policy_->CreateFilter(_slices[0], key_slices.size(), _);
+keys_.clear();
+if (kVerbose >= 2) DumpFilter();
+  }
+
+  size_t FilterSize() const {
+return filter_.size();
+  }
+
+  void DumpFilter() {
+fprintf(stderr, "F(");
+for (size_t i = 0; i+1 < filter_.size(); i++) {
+  const unsigned int c = static_cast(filter_[i]);
+  for (int j = 0; j < 8; j++) {
+fprintf(stderr, "%c", (c & (1 <KeyMayMatch(s, filter_);
+  }
+
+  double FalsePositiveRate() {
+char buffer[sizeof(int)];
+int result = 0;
+for (int i = 0; i < 1; i++) {
+  if (Matches(Key(i + 10, buffer))) {
+result++;
+  }
+}
+return result / 1.0;
+  }
+};
+
+TEST(BloomTest, EmptyFilter) {
+  ASSERT_TRUE(! Matches("hello"));
+  ASSERT_TRUE(! Matches("world"));
+}
+
+TEST(BloomTest, Small) {
+  Add("hello");
+  Add("world");
+  ASSERT_TRUE(Matches("hello"));
+  ASSERT_TRUE(Matches("world"));
+  ASSERT_TRUE(! Matches("x"));
+  ASSERT_TRUE(! Matches("foo"));
+}
+
+static int NextLength(int length) {
+  if (length < 10) {
+length += 1;
+  } else if (length < 100) {
+length += 10;
+  } else if (length < 1000) {
+length += 100;
+  } else {
+length += 1000;
+  }
+  return length;
+}
+
+TEST(BloomTest, VaryingLengths) {
+  char buffer[sizeof(int)];
+
+  // Count number of filters that significantly exceed the false positive rate
+  int mediocre_filters = 0;
+  int good_filters = 0;
+
+  for (int length = 1; length <= 1; length = NextLength(length)) {
+Reset();
+for (int i = 0; i < length; i++) {
+  Add(Key(i, buffer));
+}
+Build();
+
+ASSERT_LE(FilterSize(), static_cast((length * 10 / 8) + 40))
+<< length;
+
+// All added keys must match
+for (int i = 0; i < length; i++) {
+  ASSERT_TRUE(Matches(Key(i, buffer)))
+  << "Length " << length << "; key " << i;
+}
+
+// Check false positive rate
+double rate = FalsePositiveRate();
+if (kVerbose >= 1) {
+  fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = 
%6d\n",
+  rate*100.0, length, static_cast(FilterSize()));
+}
+ASSERT_LE(rate, 0.02);   // Must not be over 2%
+if (rate > 0.0125) mediocre_filters++;  // Allowed, but not too often
+else good_filters++;
+  }
+  if (kVerbose >= 1) {
+fprintf(stderr, "Filters: %d good, %d mediocre\n",
+good_filters, mediocre_filters);
+  }
+  ASSERT_LE(mediocre_filters, good_filters/5);
+}
+
+// Different bits-per-byte
+
+}  // namespace leveldb
+
+int main(int argc, char** argv) {
+  return leveldb::test::RunAllTests();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/cache.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/cache.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/cache.cc
new file mode 100644
index 000..8b197bc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/cache.cc
@@ -0,0 +1,325 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights 

[16/50] [abbrv] hadoop git commit: [partial-ns] Implement SetTime().

2016-01-05 Thread wheat9
[partial-ns] Implement SetTime().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36cdcd77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36cdcd77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36cdcd77

Branch: refs/heads/feature-HDFS-8286
Commit: 36cdcd77e60f66135e72b27b4a32e6af6bdb8abe
Parents: 7f09c48
Author: Haohui Mai 
Authored: Tue May 26 14:50:56 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:59 2015 -0700

--
 .../hdfs/server/namenode/FSDirAttrOp.java   | 97 +---
 .../hdfs/server/namenode/FSEditLogLoader.java   |  9 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 23 ++---
 .../hdfs/server/namenode/RWTransaction.java |  4 +
 4 files changed, 63 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36cdcd77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index f16183f..4221f80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -108,31 +108,27 @@ public class FSDirAttrOp {
 }
 
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-
-INodesInPath iip;
-fsd.writeLock();
-try {
-  src = fsd.resolvePath(pc, src, pathComponents);
-  iip = fsd.getINodesInPath4Write(src);
-  // Write access is required to set access and modification times
+try (RWTransaction tx = fsd.newRWTransaction().begin()) {
+  Resolver.Result paths = Resolver.resolve(tx, src);
+  if (paths.invalidPath()) {
+throw new InvalidPathException(src);
+  } else if (paths.notFound()) {
+throw new FileNotFoundException(src);
+  }
+  FlatINodesInPath iip = paths.inodesInPath();
   if (fsd.isPermissionEnabled()) {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
-  final INode inode = iip.getLastINode();
-  if (inode == null) {
-throw new FileNotFoundException("File/Directory " + src +
-" does not exist.");
-  }
-  boolean changed = unprotectedSetTimes(fsd, inode, mtime, atime, true,
-iip.getLatestSnapshotId());
+  FlatINode.Builder b = new FlatINode.Builder()
+  .mergeFrom(iip.getLastINode());
+  boolean changed = unprotectedSetTimes(fsd, b, mtime, atime, true);
   if (changed) {
-fsd.getEditLog().logTimes(src, mtime, atime);
+tx.putINode(b.id(), b.build());
+tx.logTimes(src, mtime, atime);
+tx.commit();
   }
-} finally {
-  fsd.writeUnlock();
+  return fsd.getAuditFileInfo(iip);
 }
-return fsd.getAuditFileInfo(iip);
   }
 
   static boolean setReplication(
@@ -280,25 +276,23 @@ public class FSDirAttrOp {
 }
   }
 
-  static boolean setTimes(
-  FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
-  int latestSnapshotId) throws QuotaExceededException {
-fsd.writeLock();
-try {
-  return unprotectedSetTimes(fsd, inode, mtime, atime, force,
- latestSnapshotId);
-} finally {
-  fsd.writeUnlock();
-}
-  }
-
-  static boolean unprotectedSetTimes(
-  FSDirectory fsd, String src, long mtime, long atime, boolean force)
-  throws UnresolvedLinkException, QuotaExceededException {
+  static void unprotectedSetTimes(
+  FSDirectory fsd, RWTransaction tx, String src, long mtime, long atime)
+  throws IOException {
 assert fsd.hasWriteLock();
-final INodesInPath i = fsd.getINodesInPath(src, true);
-return unprotectedSetTimes(fsd, i.getLastINode(), mtime, atime,
-   force, i.getLatestSnapshotId());
+Resolver.Result paths = Resolver.resolve(tx, src);
+if (paths.invalidPath()) {
+  throw new InvalidPathException(src);
+} else if (paths.notFound()) {
+  throw new FileNotFoundException(src);
+}
+FlatINodesInPath iip = paths.inodesInPath();
+FlatINode.Builder b = new FlatINode.Builder()
+.mergeFrom(iip.getLastINode());
+boolean changed = unprotectedSetTimes(fsd, b, mtime, atime, true);
+if (changed) {
+  tx.putINode(b.id(), b.build());
+}
   }
 
   /**
@@ 

[20/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/filter_policy.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/filter_policy.h
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/filter_policy.h
new file mode 100644
index 000..1fba080
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/filter_policy.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// A database can be configured with a custom FilterPolicy object.
+// This object is responsible for creating a small filter from a set
+// of keys.  These filters are stored in leveldb and are consulted
+// automatically by leveldb to decide whether or not to read some
+// information from disk. In many cases, a filter can cut down the
+// number of disk seeks form a handful to a single disk seek per
+// DB::Get() call.
+//
+// Most people will want to use the builtin bloom filter support (see
+// NewBloomFilterPolicy() below).
+
+#ifndef STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
+#define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
+
+#include 
+
+namespace leveldb {
+
+class Slice;
+
+class FilterPolicy {
+ public:
+  virtual ~FilterPolicy();
+
+  // Return the name of this policy.  Note that if the filter encoding
+  // changes in an incompatible way, the name returned by this method
+  // must be changed.  Otherwise, old incompatible filters may be
+  // passed to methods of this type.
+  virtual const char* Name() const = 0;
+
+  // keys[0,n-1] contains a list of keys (potentially with duplicates)
+  // that are ordered according to the user supplied comparator.
+  // Append a filter that summarizes keys[0,n-1] to *dst.
+  //
+  // Warning: do not change the initial contents of *dst.  Instead,
+  // append the newly constructed filter to *dst.
+  virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
+  const = 0;
+
+  // "filter" contains the data appended by a preceding call to
+  // CreateFilter() on this class.  This method must return true if
+  // the key was in the list of keys passed to CreateFilter().
+  // This method may return true or false if the key was not on the
+  // list, but it should aim to return false with a high probability.
+  virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0;
+};
+
+// Return a new filter policy that uses a bloom filter with approximately
+// the specified number of bits per key.  A good value for bits_per_key
+// is 10, which yields a filter with ~ 1% false positive rate.
+//
+// Callers must delete the result after any database that is using the
+// result has been closed.
+//
+// Note: if you are using a custom comparator that ignores some parts
+// of the keys being compared, you must not use NewBloomFilterPolicy()
+// and must provide your own FilterPolicy that also ignores the
+// corresponding parts of the keys.  For example, if the comparator
+// ignores trailing spaces, it would be incorrect to use a
+// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
+// trailing spaces in keys.
+extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
+
+}
+
+#endif  // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/iterator.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/iterator.h
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/iterator.h
new file mode 100644
index 000..ad543eb
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/include/leveldb/iterator.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// An iterator yields a sequence of key/value pairs from a source.
+// The following class defines the interface.  Multiple implementations
+// are provided by this library.  In particular, iterators are provided
+// to access the contents of a Table or a DB.
+//
+// Multiple threads can invoke const methods on an Iterator without
+// external synchronization, but if any of the threads may call a
+// non-const method, all threads accessing the same Iterator must use
+// external synchronization.
+
+#ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
+#define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_
+
+#include "leveldb/slice.h"

[05/50] [abbrv] hadoop git commit: HDFS-8248. Store INodeId instead of the INodeFile object in BlockInfoContiguous.

2016-01-05 Thread wheat9
HDFS-8248. Store INodeId instead of the INodeFile object in BlockInfoContiguous.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72e1828b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72e1828b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72e1828b

Branch: refs/heads/feature-HDFS-8286
Commit: 72e1828bf5afb1ff43a0202783075dcafa266b28
Parents: cf23b98
Author: Haohui Mai 
Authored: Fri Apr 24 15:09:04 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:56 2015 -0700

--
 .../server/blockmanagement/BlockCollection.java |  5 ++
 .../blockmanagement/BlockInfoContiguous.java| 24 ---
 .../server/blockmanagement/BlockManager.java| 75 +++-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 10 +--
 .../blockmanagement/DecommissionManager.java| 11 +--
 .../SequentialBlockIdGenerator.java |  3 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 27 ---
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  4 +-
 .../hadoop/hdfs/server/namenode/INodeId.java|  1 +
 .../hdfs/server/namenode/NamenodeFsck.java  |  4 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  3 +
 .../server/blockmanagement/TestBlockInfo.java   |  8 +--
 .../blockmanagement/TestBlockManager.java   |  9 +++
 .../blockmanagement/TestReplicationPolicy.java  | 10 ++-
 .../TestCommitBlockSynchronization.java |  6 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 33 +
 .../snapshot/TestSnapshotBlocksMap.java | 12 ++--
 .../namenode/snapshot/TestSnapshotDeletion.java |  9 +--
 18 files changed, 154 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72e1828b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 0ee0439..2d3a0f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -80,4 +80,9 @@ public interface BlockCollection {
* @return whether the block collection is under construction.
*/
   public boolean isUnderConstruction();
+
+  /**
+   * @return the INodeId for the block collection
+   */
+  long getId();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72e1828b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 7a78708..7e0699e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -24,6 +24,8 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.util.LightWeightGSet;
 
+import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
+
 /**
  * BlockInfo class maintains for a given block
  * the {@link BlockCollection} it is part of and datanodes where the replicas 
of 
@@ -34,7 +36,7 @@ public class BlockInfoContiguous extends Block
 implements LightWeightGSet.LinkedElement {
   public static final BlockInfoContiguous[] EMPTY_ARRAY = {};
 
-  private BlockCollection bc;
+  private long bcId;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface */
   private LightWeightGSet.LinkedElement nextLinkedElement;
@@ -61,14 +63,14 @@ public class BlockInfoContiguous extends Block
*/
   public BlockInfoContiguous(short replication) {
 this.triplets = new Object[3*replication];
-this.bc = null;
+this.bcId = INVALID_INODE_ID;
 this.replication = replication;
   }
   
   public BlockInfoContiguous(Block blk, short replication) {
 super(blk);
 this.triplets = new Object[3*replication];
-this.bc = null;
+this.bcId = INVALID_INODE_ID;
 this.replication = replication;
   }
 
@@ -79,7 +81,7 @@ public class 

[02/50] [abbrv] hadoop git commit: Change INodeFile#getBlockReplication() to BlockInfoContinguous#getReplication().

2016-01-05 Thread wheat9
Change INodeFile#getBlockReplication() to BlockInfoContinguous#getReplication().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bace6f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bace6f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bace6f3

Branch: refs/heads/feature-HDFS-8286
Commit: 2bace6f36b6032e62c479db57a4d9a15859297eb
Parents: 7a82dbd
Author: Haohui Mai 
Authored: Mon May 4 14:06:24 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:55 2015 -0700

--
 .../blockmanagement/BlockInfoContiguous.java| 17 --
 .../server/blockmanagement/BlockManager.java| 56 ++--
 .../blockmanagement/DecommissionManager.java| 13 ++---
 .../hdfs/server/namenode/FSDirAttrOp.java   | 35 
 .../hdfs/server/namenode/FSDirConcatOp.java | 22 
 .../hdfs/server/namenode/FSEditLogLoader.java   | 12 +++--
 .../hdfs/server/namenode/FSNamesystem.java  |  4 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  3 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |  3 +-
 .../blockmanagement/TestBlockManager.java   |  1 -
 .../blockmanagement/TestReplicationPolicy.java  |  2 -
 11 files changed, 84 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 769046b..7a78708 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -53,6 +53,8 @@ public class BlockInfoContiguous extends Block
*/
   private Object[] triplets;
 
+  private short replication;
+
   /**
* Construct an entry for blocksmap
* @param replication the block's replication factor
@@ -60,12 +62,14 @@ public class BlockInfoContiguous extends Block
   public BlockInfoContiguous(short replication) {
 this.triplets = new Object[3*replication];
 this.bc = null;
+this.replication = replication;
   }
   
   public BlockInfoContiguous(Block blk, short replication) {
 super(blk);
 this.triplets = new Object[3*replication];
 this.bc = null;
+this.replication = replication;
   }
 
   /**
@@ -74,11 +78,18 @@ public class BlockInfoContiguous extends Block
* @param from BlockInfo to copy from.
*/
   protected BlockInfoContiguous(BlockInfoContiguous from) {
-super(from);
-this.triplets = new Object[from.triplets.length];
+this(from, from.getReplication());
 this.bc = from.bc;
   }
 
+  public void setReplication(short replication) {
+this.replication = replication;
+  }
+
+  public short getReplication() {
+return replication;
+  }
+
   public BlockCollection getBlockCollection() {
 return bc;
   }
@@ -362,7 +373,7 @@ public class BlockInfoContiguous extends Block
 if(isComplete()) {
   BlockInfoContiguousUnderConstruction ucBlock =
   new BlockInfoContiguousUnderConstruction(this,
-  getBlockCollection().getPreferredBlockReplication(), s, targets);
+  getReplication(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
   return ucBlock;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bace6f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 44868aa..2e9b5b66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1109,8 +1109,7 @@ public class BlockManager {
   addToInvalidates(b.corrupted, node);
   return;
 } 
-short expectedReplicas =
-b.corrupted.getBlockCollection().getPreferredBlockReplication();
+short expectedReplicas = b.corrupted.getReplication();
 
 // Add replica to the data-node if it is not already there
 if 

[31/50] [abbrv] hadoop git commit: [partial-ns] Add skip_wal option to HDFSDB.

2016-01-05 Thread wheat9
[partial-ns] Add skip_wal option to HDFSDB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2cf9bc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2cf9bc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2cf9bc9

Branch: refs/heads/feature-HDFS-8286
Commit: c2cf9bc97f96699142826bff986243a719113322
Parents: af9ff74
Author: Haohui Mai 
Authored: Mon Nov 3 14:44:26 2014 -0800
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:00 2015 -0700

--
 .../apache/hadoop/hdfs/hdfsdb/WriteOptions.java |  6 +++
 .../src/main/native/hdfsdb/db/db_impl.cc| 48 +++-
 .../native/hdfsdb/include/leveldb/options.h |  5 +-
 .../src/main/native/jni/bindings.cc |  5 ++
 4 files changed, 41 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2cf9bc9/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
index 88ebcad..2dc04bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
@@ -27,6 +27,11 @@ public class WriteOptions extends NativeObject {
 return this;
   }
 
+  public WriteOptions skipWal(boolean value) {
+skipWal(nativeHandle, value);
+return this;
+  }
+
   @Override
   public void close() {
 if (nativeHandle != 0) {
@@ -38,4 +43,5 @@ public class WriteOptions extends NativeObject {
   private static native long construct();
   private static native void destruct(long handle);
   private static native void sync(long handle, boolean value);
+  private static native void skipWal(long handle, boolean value);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2cf9bc9/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_impl.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_impl.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_impl.cc
index faf5e7d..1225412 100644
--- a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_impl.cc
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_impl.cc
@@ -1183,29 +1183,33 @@ Status DBImpl::Write(const WriteOptions& options, 
WriteBatch* my_batch) {
 WriteBatchInternal::SetSequence(updates, last_sequence + 1);
 last_sequence += WriteBatchInternal::Count(updates);
 
-// Add to log and apply to memtable.  We can release the lock
-// during this phase since  is currently responsible for logging
-// and protects against concurrent loggers and concurrent writes
-// into mem_.
-{
-  mutex_.Unlock();
-  status = log_->AddRecord(WriteBatchInternal::Contents(updates));
-  bool sync_error = false;
-  if (status.ok() && options.sync) {
-status = logfile_->Sync();
-if (!status.ok()) {
-  sync_error = true;
+if (options.skip_wal) {
+  status = WriteBatchInternal::InsertInto(updates, mem_);
+} else {
+  // Add to log and apply to memtable.  We can release the lock
+  // during this phase since  is currently responsible for logging
+  // and protects against concurrent loggers and concurrent writes
+  // into mem_.
+  {
+mutex_.Unlock();
+status = log_->AddRecord(WriteBatchInternal::Contents(updates));
+bool sync_error = false;
+if (status.ok() && options.sync) {
+  status = logfile_->Sync();
+  if (!status.ok()) {
+sync_error = true;
+  }
+}
+if (status.ok()) {
+  status = WriteBatchInternal::InsertInto(updates, mem_);
+}
+mutex_.Lock();
+if (sync_error) {
+  // The state of the log file is indeterminate: the log record we
+  // just added may or may not show up when the DB is re-opened.
+  // So we force the DB into a mode where all future writes fail.
+  RecordBackgroundError(status);
 }
-  }
-  if (status.ok()) {
-status = WriteBatchInternal::InsertInto(updates, mem_);
-  }
-  mutex_.Lock();
-  if (sync_error) {
-// The state of the log file is indeterminate: the log record we
-// just added may or may not show up when the DB is re-opened.
-// So we force the DB into a mode where all future writes fail.
-

[41/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/alice29.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/alice29.txt 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/alice29.txt
new file mode 100644
index 000..f115686
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/alice29.txt
@@ -0,0 +1,3609 @@
+
+
+
+
+ALICE'S ADVENTURES IN WONDERLAND
+
+  Lewis Carroll
+
+   THE MILLENNIUM FULCRUM EDITION 2.9
+
+
+
+
+CHAPTER I
+
+  Down the Rabbit-Hole
+
+
+  Alice was beginning to get very tired of sitting by her sister
+on the bank, and of having nothing to do:  once or twice she had
+peeped into the book her sister was reading, but it had no
+pictures or conversations in it, `and what is the use of a book,'
+thought Alice `without pictures or conversation?'
+
+  So she was considering in her own mind (as well as she could,
+for the hot day made her feel very sleepy and stupid), whether
+the pleasure of making a daisy-chain would be worth the trouble
+of getting up and picking the daisies, when suddenly a White
+Rabbit with pink eyes ran close by her.
+
+  There was nothing so VERY remarkable in that; nor did Alice
+think it so VERY much out of the way to hear the Rabbit say to
+itself, `Oh dear!  Oh dear!  I shall be late!'  (when she thought
+it over afterwards, it occurred to her that she ought to have
+wondered at this, but at the time it all seemed quite natural);
+but when the Rabbit actually TOOK A WATCH OUT OF ITS WAISTCOAT-
+POCKET, and looked at it, and then hurried on, Alice started to
+her feet, for it flashed across her mind that she had never
+before seen a rabbit with either a waistcoat-pocket, or a watch to
+take out of it, and burning with curiosity, she ran across the
+field after it, and fortunately was just in time to see it pop
+down a large rabbit-hole under the hedge.
+
+  In another moment down went Alice after it, never once
+considering how in the world she was to get out again.
+
+  The rabbit-hole went straight on like a tunnel for some way,
+and then dipped suddenly down, so suddenly that Alice had not a
+moment to think about stopping herself before she found herself
+falling down a very deep well.
+
+  Either the well was very deep, or she fell very slowly, for she
+had plenty of time as she went down to look about her and to
+wonder what was going to happen next.  First, she tried to look
+down and make out what she was coming to, but it was too dark to
+see anything; then she looked at the sides of the well, and
+noticed that they were filled with cupboards and book-shelves;
+here and there she saw maps and pictures hung upon pegs.  She
+took down a jar from one of the shelves as she passed; it was
+labelled `ORANGE MARMALADE', but to her great disappointment it
+was empty:  she did not like to drop the jar for fear of killing
+somebody, so managed to put it into one of the cupboards as she
+fell past it.
+
+  `Well!' thought Alice to herself, `after such a fall as this, I
+shall think nothing of tumbling down stairs!  How brave they'll
+all think me at home!  Why, I wouldn't say anything about it,
+even if I fell off the top of the house!' (Which was very likely
+true.)
+
+  Down, down, down.  Would the fall NEVER come to an end!  `I
+wonder how many miles I've fallen by this time?' she said aloud.
+`I must be getting somewhere near the centre of the earth.  Let
+me see:  that would be four thousand miles down, I think--' (for,
+you see, Alice had learnt several things of this sort in her
+lessons in the schoolroom, and though this was not a VERY good
+opportunity for showing off her knowledge, as there was no one to
+listen to her, still it was good practice to say it over) `--yes,
+that's about the right distance--but then I wonder what Latitude
+or Longitude I've got to?'  (Alice had no idea what Latitude was,
+or Longitude either, but thought they were nice grand words to
+say.)
+
+  Presently she began again.  `I wonder if I shall fall right
+THROUGH the earth!  How funny it'll seem to come out among the
+people that walk with their heads downward!  The Antipathies, I
+think--' (she was rather glad there WAS no one listening, this
+time, as it didn't sound at all the right word) `--but I shall
+have to ask them what the name of the country is, you know.
+Please, Ma'am, is this New Zealand or Australia?' (and she tried
+to curtsey as she spoke--fancy CURTSEYING as you're falling
+through the air!  Do you think you could manage it?)  `And what
+an ignorant little girl she'll think me for asking!  No, it'll
+never do to ask:  perhaps I shall see it written up somewhere.'
+
+  Down, down, down.  There was nothing else to do, so Alice soon

[47/50] [abbrv] hadoop git commit: WIP Various fixes and performance improvements.

2016-01-05 Thread wheat9
WIP Various fixes and performance improvements.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13f7b647
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13f7b647
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13f7b647

Branch: refs/heads/feature-HDFS-8286
Commit: 13f7b64731e05903832eb289ae067f7e24afac41
Parents: 23dbb0f
Author: Haohui Mai 
Authored: Wed Jun 3 14:14:54 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:01 2015 -0700

--
 .../hdfs/server/namenode/FSDirectory.java   | 12 ++-
 .../server/namenode/LevelDBROTransaction.java   | 39 ++---
 .../server/namenode/LevelDBRWTransaction.java   |  9 ++-
 .../namenode/LevelDBReplayTransaction.java  |  9 ++-
 .../hdfs/server/namenode/MemDBChildrenView.java |  6 +-
 .../hdfs/server/namenode/LevelDBProfile.java| 83 
 .../java/org/apache/hadoop/hdfs/hdfsdb/DB.java  | 23 +-
 .../org/apache/hadoop/hdfs/hdfsdb/Options.java  |  6 ++
 .../apache/hadoop/hdfs/hdfsdb/ReadOptions.java  |  6 ++
 .../org/apache/hadoop/hdfs/hdfsdb/Snapshot.java | 34 
 .../src/main/native/hdfsdb/db/db_impl.cc| 46 +++
 .../src/main/native/hdfsdb/db/db_impl.h |  4 +
 .../src/main/native/hdfsdb/db/db_test.cc|  8 ++
 .../src/main/native/hdfsdb/db/memtable.cc   |  8 +-
 .../src/main/native/hdfsdb/db/memtable.h| 13 ++-
 .../src/main/native/hdfsdb/include/leveldb/db.h | 11 +++
 .../src/main/native/jni/bindings.cc | 66 +++-
 17 files changed, 355 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13f7b647/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 0e50d8c..b3c6083 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -348,7 +348,17 @@ public class FSDirectory implements Closeable {
 this.enableLevelDb = conf.getBoolean("dfs.partialns", false);
 if (enableLevelDb) {
   String dbPath = conf.get("dfs.partialns.path");
-  Options options = new Options().createIfMissing(true);
+  int writeBufferSize = conf.getInt("dfs.partialns.writebuffer",
+4096 * 1024);
+  long blockCacheSize = conf.getLong(
+  "dfs.partialns.blockcache", 0);
+  Options options = new Options().createIfMissing(true)
+  .writeBufferSize(writeBufferSize);
+
+  if (blockCacheSize != 0) {
+options.blockCacheSize(blockCacheSize);
+  }
+
   this.levelDb = org.apache.hadoop.hdfs.hdfsdb.DB.open(options, dbPath);
   try (RWTransaction tx = newRWTransaction().begin()) {
 tx.putINode(ROOT_INODE_ID, createRootForFlatNS(ns));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13f7b647/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
index f55ed63..50d8c30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBROTransaction.java
@@ -29,37 +29,43 @@ import static 
org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
 
 class LevelDBROTransaction extends ROTransaction {
   private final org.apache.hadoop.hdfs.hdfsdb.DB hdfsdb;
-  private static final ReadOptions OPTIONS = new ReadOptions();
+
+  private Snapshot snapshot;
+  private final ReadOptions options = new ReadOptions();
+  public static final ReadOptions OPTIONS = new ReadOptions();
+
   LevelDBROTransaction(FSDirectory fsd, org.apache.hadoop.hdfs.hdfsdb.DB db) {
 super(fsd);
 this.hdfsdb = db;
   }
 
   LevelDBROTransaction begin() {
-fsd.readLock();
+snapshot = hdfsdb.snapshot();
+options.snapshot(snapshot);
 return this;
   }
 
   @Override
   FlatINode getINode(long id) {
-return getFlatINode(id, hdfsdb);
+return getFlatINode(id, hdfsdb, options);
   }

[21/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/index.html 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/index.html
new file mode 100644
index 000..3ed0ed9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/index.html
@@ -0,0 +1,549 @@
+
+
+
+
+Leveldb
+
+
+
+Leveldb
+Jeff Dean, Sanjay Ghemawat
+
+The leveldb library provides a persistent key value store.  Keys 
and
+values are arbitrary byte arrays.  The keys are ordered within the key
+value store according to a user-specified comparator function.
+
+
+Opening A Database
+
+A leveldb database has a name which corresponds to a file system
+directory.  All of the contents of database are stored in this
+directory.  The following example shows how to open a database,
+creating it if necessary:
+
+
+  #include assert
+  #include "leveldb/db.h"
+
+  leveldb::DB* db;
+  leveldb::Options options;
+  options.create_if_missing = true;
+  leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", db);
+  assert(status.ok());
+  ...
+
+If you want to raise an error if the database already exists, add
+the following line before the leveldb::DB::Open call:
+
+  options.error_if_exists = true;
+
+Status
+
+You may have noticed the leveldb::Status type above.  Values of 
this
+type are returned by most functions in leveldb that may encounter 
an
+error.  You can check if such a result is ok, and also print an
+associated error message:
+
+
+   leveldb::Status s = ...;
+   if (!s.ok()) cerr  s.ToString()  endl;
+
+Closing A Database
+
+When you are done with a database, just delete the database object.
+Example:
+
+
+  ... open the db as described above ...
+  ... do something with db ...
+  delete db;
+
+Reads And Writes
+
+The database provides Put, Delete, and 
Get methods to
+modify/query the database.  For example, the following code
+moves the value stored under key1 to key2.
+
+  std::string value;
+  leveldb::Status s = db-Get(leveldb::ReadOptions(), key1, value);
+  if (s.ok()) s = db-Put(leveldb::WriteOptions(), key2, value);
+  if (s.ok()) s = db-Delete(leveldb::WriteOptions(), key1);
+
+
+Atomic Updates
+
+Note that if the process dies after the Put of key2 but before the
+delete of key1, the same value may be left stored under multiple keys.
+Such problems can be avoided by using the WriteBatch class to
+atomically apply a set of updates:
+
+
+  #include "leveldb/write_batch.h"
+  ...
+  std::string value;
+  leveldb::Status s = db-Get(leveldb::ReadOptions(), key1, value);
+  if (s.ok()) {
+leveldb::WriteBatch batch;
+batch.Delete(key1);
+batch.Put(key2, value);
+s = db-Write(leveldb::WriteOptions(), batch);
+  }
+
+The WriteBatch holds a sequence of edits to be made to the 
database,
+and these edits within the batch are applied in order.  Note that we
+called Delete before Put so that if 
key1 is identical to key2,
+we do not end up erroneously dropping the value entirely.
+
+Apart from its atomicity benefits, WriteBatch may also be used to
+speed up bulk updates by placing lots of individual mutations into the
+same batch.
+
+Synchronous Writes
+By default, each write to leveldb is asynchronous: it
+returns after pushing the write from the process into the operating
+system.  The transfer from operating system memory to the underlying
+persistent storage happens asynchronously.  The sync flag
+can be turned on for a particular write to make the write operation
+not return until the data being written has been pushed all the way to
+persistent storage.  (On Posix systems, this is implemented by calling
+either fsync(...) or fdatasync(...) or
+msync(..., MS_SYNC) before the write operation returns.)
+
+  leveldb::WriteOptions write_options;
+  write_options.sync = true;
+  db-Put(write_options, ...);
+
+Asynchronous writes are often more than a thousand times as fast as
+synchronous writes.  The downside of asynchronous writes is that a
+crash of the machine may cause the last few updates to be lost.  Note
+that a crash of just the writing process (i.e., not a reboot) will not
+cause any loss since even when sync is false, an update
+is pushed from the process memory into the operating system before it
+is considered done.
+
+
+Asynchronous writes can often be used safely.  For example, when
+loading a large amount of data into the database you can handle lost
+updates by restarting the bulk load after a crash.  A hybrid scheme is
+also possible where every Nth write is synchronous, and in the event
+of a crash, the bulk load is restarted just after the last synchronous
+write finished by the previous run.  (The synchronous write can update
+a marker that describes where to restart on a crash.)
+
+
+WriteBatch provides an alternative to asynchronous 

[10/50] [abbrv] hadoop git commit: [partial-ns] Implement getAdditionalBlock().

2016-01-05 Thread wheat9
[partial-ns] Implement getAdditionalBlock().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a36f5bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a36f5bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a36f5bc

Branch: refs/heads/feature-HDFS-8286
Commit: 6a36f5bcc4edfba4e9779b0fd19923179138b551
Parents: e9c9c72b
Author: Haohui Mai 
Authored: Thu May 14 17:02:18 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:57 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 109 +---
 .../hdfs/server/blockmanagement/BlocksMap.java  |   8 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 247 ++-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  11 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  73 +-
 .../hdfs/server/namenode/RWTransaction.java |   4 +
 .../hadoop/hdfs/server/namenode/Resolver.java   |   4 +
 7 files changed, 298 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a36f5bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6139e37..9b18f45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
@@ -64,6 +65,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBloc
 import 
org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.namenode.FlatINodeFileFeature;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@@ -638,17 +640,42 @@ public class BlockManager {
   }
 
   /**
-   * Convert a specified block of the file to a complete block.
-   * @param bc file
-   * @param blkIndex  block index in the file
-   * @throws IOException if the block does not have at least a minimal number
-   * of replicas reported from data-nodes.
+   * Commit or complete the last block. Return the length of the last block
*/
+  public long commitOrCompleteLastBlock(FlatINodeFileFeature file, Block
+  commitBlock) throws IOException {
+if(commitBlock == null)
+  return 0; // not committing, this is a block allocation retry
+BlockInfoContiguous lastBlock = getStoredBlock(file.lastBlock());
+if(lastBlock == null)
+  return 0; // no blocks in file yet
+if(lastBlock.isComplete())
+  return lastBlock.getNumBytes(); // already completed (e.g. by syncBlock)
+
+commitBlock((BlockInfoContiguousUnderConstruction) lastBlock, commitBlock);
+if(countNodes(lastBlock).liveReplicas() >= minReplication) {
+  return completeBlock(lastBlock, false).getNumBytes();
+}
+return lastBlock.getNumBytes();
+  }
+
   private BlockInfoContiguous completeBlock(final BlockCollection bc,
   final int blkIndex, boolean force) throws IOException {
 if(blkIndex < 0)
   return null;
-BlockInfoContiguous curBlock = bc.getBlocks()[blkIndex];
+BlockInfoContiguous block = completeBlock(bc.getBlocks()[blkIndex], force);
+// replace penultimate block in file
+bc.setBlock(blkIndex, block);
+return block;
+  }
+
+  /**
+   * Convert a specified block of the file to a complete block.
+   * @throws IOException if the block does not have at least a minimal number
+   * of replicas reported from data-nodes.
+   */
+  private BlockInfoContiguous completeBlock(BlockInfoContiguous curBlock,
+  boolean force) throws IOException {
 if(curBlock.isComplete())
   return curBlock;
 BlockInfoContiguousUnderConstruction ucBlock =
@@ -661,9 +688,7 @@ public class BlockManager {
   throw new IOException(
   "Cannot complete block: block has not been COMMITTED by the client");
 

[42/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy.h
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy.h 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy.h
new file mode 100644
index 000..e879e79
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy.h
@@ -0,0 +1,184 @@
+// Copyright 2005 and onwards Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A light-weight compression algorithm.  It is designed for speed of
+// compression and decompression, rather than for the utmost in space
+// savings.
+//
+// For getting better compression ratios when you are compressing data
+// with long repeated sequences or compressing data that is similar to
+// other data, while still compressing fast, you might look at first
+// using BMDiff and then compressing the output of BMDiff with
+// Snappy.
+
+#ifndef UTIL_SNAPPY_SNAPPY_H__
+#define UTIL_SNAPPY_SNAPPY_H__
+
+#include 
+#include 
+
+#include "snappy-stubs-public.h"
+
+namespace snappy {
+  class Source;
+  class Sink;
+
+  // 
+  // Generic compression/decompression routines.
+  // 
+
+  // Compress the bytes read from "*source" and append to "*sink". Return the
+  // number of bytes written.
+  size_t Compress(Source* source, Sink* sink);
+
+  // Find the uncompressed length of the given stream, as given by the header.
+  // Note that the true length could deviate from this; the stream could e.g.
+  // be truncated.
+  //
+  // Also note that this leaves "*source" in a state that is unsuitable for
+  // further operations, such as RawUncompress(). You will need to rewind
+  // or recreate the source yourself before attempting any further calls.
+  bool GetUncompressedLength(Source* source, uint32* result);
+
+  // 
+  // Higher-level string based routines (should be sufficient for most users)
+  // 
+
+  // Sets "*output" to the compressed version of "input[0,input_length-1]".
+  // Original contents of *output are lost.
+  //
+  // REQUIRES: "input[]" is not an alias of "*output".
+  size_t Compress(const char* input, size_t input_length, string* output);
+
+  // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
+  // Original contents of "*uncompressed" are lost.
+  //
+  // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
+  //
+  // returns false if the message is corrupted and could not be decompressed
+  bool Uncompress(const char* compressed, size_t compressed_length,
+  string* uncompressed);
+
+
+  // 
+  // Lower-level character array based routines.  May be useful for
+  // efficiency reasons in certain circumstances.
+  // 
+
+  // REQUIRES: "compressed" must point to an area of memory that is at
+  // least "MaxCompressedLength(input_length)" bytes in length.
+  //
+  // Takes the data stored in "input[0..input_length]" and stores
+  // it in the array pointed to by "compressed".
+  //
+  // 

[26/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_iter.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_iter.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_iter.cc
new file mode 100644
index 000..3b2035e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_iter.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include "db/db_iter.h"
+
+#include "db/filename.h"
+#include "db/db_impl.h"
+#include "db/dbformat.h"
+#include "leveldb/env.h"
+#include "leveldb/iterator.h"
+#include "port/port.h"
+#include "util/logging.h"
+#include "util/mutexlock.h"
+#include "util/random.h"
+
+namespace leveldb {
+
+#if 0
+static void DumpInternalIter(Iterator* iter) {
+  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
+ParsedInternalKey k;
+if (!ParseInternalKey(iter->key(), )) {
+  fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
+} else {
+  fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
+}
+  }
+}
+#endif
+
+namespace {
+
+// Memtables and sstables that make the DB representation contain
+// (userkey,seq,type) => uservalue entries.  DBIter
+// combines multiple entries for the same userkey found in the DB
+// representation into a single entry while accounting for sequence
+// numbers, deletion markers, overwrites, etc.
+class DBIter: public Iterator {
+ public:
+  // Which direction is the iterator currently moving?
+  // (1) When moving forward, the internal iterator is positioned at
+  // the exact entry that yields this->key(), this->value()
+  // (2) When moving backwards, the internal iterator is positioned
+  // just before all entries whose user key == this->key().
+  enum Direction {
+kForward,
+kReverse
+  };
+
+  DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
+ uint32_t seed)
+  : db_(db),
+user_comparator_(cmp),
+iter_(iter),
+sequence_(s),
+direction_(kForward),
+valid_(false),
+rnd_(seed),
+bytes_counter_(RandomPeriod()) {
+  }
+  virtual ~DBIter() {
+delete iter_;
+  }
+  virtual bool Valid() const { return valid_; }
+  virtual Slice key() const {
+assert(valid_);
+return (direction_ == kForward) ? ExtractUserKey(iter_->key()) : 
saved_key_;
+  }
+  virtual Slice value() const {
+assert(valid_);
+return (direction_ == kForward) ? iter_->value() : saved_value_;
+  }
+  virtual Status status() const {
+if (status_.ok()) {
+  return iter_->status();
+} else {
+  return status_;
+}
+  }
+
+  virtual void Next();
+  virtual void Prev();
+  virtual void Seek(const Slice& target);
+  virtual void SeekToFirst();
+  virtual void SeekToLast();
+
+ private:
+  void FindNextUserEntry(bool skipping, std::string* skip);
+  void FindPrevUserEntry();
+  bool ParseKey(ParsedInternalKey* key);
+
+  inline void SaveKey(const Slice& k, std::string* dst) {
+dst->assign(k.data(), k.size());
+  }
+
+  inline void ClearSavedValue() {
+if (saved_value_.capacity() > 1048576) {
+  std::string empty;
+  swap(empty, saved_value_);
+} else {
+  saved_value_.clear();
+}
+  }
+
+  // Pick next gap with average value of config::kReadBytesPeriod.
+  ssize_t RandomPeriod() {
+return rnd_.Uniform(2*config::kReadBytesPeriod);
+  }
+
+  DBImpl* db_;
+  const Comparator* const user_comparator_;
+  Iterator* const iter_;
+  SequenceNumber const sequence_;
+
+  Status status_;
+  std::string saved_key_; // == current key when direction_==kReverse
+  std::string saved_value_;   // == current raw value when direction_==kReverse
+  Direction direction_;
+  bool valid_;
+
+  Random rnd_;
+  ssize_t bytes_counter_;
+
+  // No copying allowed
+  DBIter(const DBIter&);
+  void operator=(const DBIter&);
+};
+
+inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
+  Slice k = iter_->key();
+  ssize_t n = k.size() + iter_->value().size();
+  bytes_counter_ -= n;
+  while (bytes_counter_ < 0) {
+bytes_counter_ += RandomPeriod();
+db_->RecordReadSample(k);
+  }
+  if (!ParseInternalKey(k, ikey)) {
+status_ = Status::Corruption("corrupted internal key in DBIter");
+return false;
+  } else {
+return true;
+  }
+}
+
+void DBIter::Next() {
+  assert(valid_);
+
+  if (direction_ == kReverse) {  // Switch directions?
+direction_ = kForward;
+// iter_ is pointing just before the entries for this->key(),
+// so advance into the range of entries for this->key() and then
+// use the normal skipping code below.
+if (!iter_->Valid()) {
+  iter_->SeekToFirst();
+  

[44/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/Makefile.am
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/Makefile.am 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/Makefile.am
new file mode 100644
index 000..735bc12
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/Makefile.am
@@ -0,0 +1,23 @@
+ACLOCAL_AMFLAGS = -I m4
+
+# Library.
+lib_LTLIBRARIES = libsnappy.la
+libsnappy_la_SOURCES = snappy.cc snappy-sinksource.cc snappy-stubs-internal.cc 
snappy-c.cc
+libsnappy_la_LDFLAGS = -version-info $(SNAPPY_LTVERSION)
+
+include_HEADERS = snappy.h snappy-sinksource.h snappy-stubs-public.h snappy-c.h
+noinst_HEADERS = snappy-internal.h snappy-stubs-internal.h snappy-test.h
+
+# Unit tests and benchmarks.
+snappy_unittest_CPPFLAGS = $(gflags_CFLAGS) $(GTEST_CPPFLAGS)
+snappy_unittest_SOURCES = snappy_unittest.cc snappy-test.cc
+snappy_unittest_LDFLAGS = $(GTEST_LDFLAGS)
+snappy_unittest_LDADD = libsnappy.la $(UNITTEST_LIBS) $(gflags_LIBS) 
$(GTEST_LIBS)
+TESTS = snappy_unittest
+noinst_PROGRAMS = $(TESTS)
+
+EXTRA_DIST = autogen.sh testdata/alice29.txt testdata/asyoulik.txt 
testdata/baddata1.snappy testdata/baddata2.snappy testdata/baddata3.snappy 
testdata/geo.protodata testdata/fireworks.jpeg testdata/html testdata/html_x_4 
testdata/kppkn.gtb testdata/lcet10.txt testdata/paper-100k.pdf 
testdata/plrabn12.txt testdata/urls.10K
+dist_doc_DATA = ChangeLog COPYING INSTALL NEWS README format_description.txt 
framing_format.txt
+
+libtool: $(LIBTOOL_DEPS)
+   $(SHELL) ./config.status --recheck

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/NEWS
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/NEWS 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/NEWS
new file mode 100644
index 000..27a5b17
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/NEWS
@@ -0,0 +1,128 @@
+Snappy v1.1.2, February 28th 2014:
+
+This is a maintenance release with no changes to the actual library
+source code.
+
+  * Stop distributing benchmark data files that have unclear
+or unsuitable licensing.
+
+  * Add support for padding chunks in the framing format.
+
+
+Snappy v1.1.1, October 15th 2013:
+
+  * Add support for uncompressing to iovecs (scatter I/O).
+The bulk of this patch was contributed by Mohit Aron.
+
+  * Speed up decompression by ~2%; much more so (~13-20%) on
+a few benchmarks on given compilers and CPUs.
+
+  * Fix a few issues with MSVC compilation.
+
+  * Support truncated test data in the benchmark.
+
+
+Snappy v1.1.0, January 18th 2013:
+
+  * Snappy now uses 64 kB block size instead of 32 kB. On average,
+this means it compresses about 3% denser (more so for some
+inputs), at the same or better speeds.
+
+  * libsnappy no longer depends on iostream.
+
+  * Some small performance improvements in compression on x86
+(0.5–1%).
+
+  * Various portability fixes for ARM-based platforms, for MSVC,
+and for GNU/Hurd.
+
+
+Snappy v1.0.5, February 24th 2012:
+
+  * More speed improvements. Exactly how big will depend on
+the architecture:
+
+- 3–10% faster decompression for the base case (x86-64).
+
+- ARMv7 and higher can now use unaligned accesses,
+  and will see about 30% faster decompression and
+  20–40% faster compression.
+
+- 32-bit platforms (ARM and 32-bit x86) will see 2–5%
+  faster compression.
+
+These are all cumulative (e.g., ARM gets all three speedups).
+
+  * Fixed an issue where the unit test would crash on system
+with less than 256 MB address space available,
+e.g. some embedded platforms.
+
+  * Added a framing format description, for use over e.g. HTTP,
+or for a command-line compressor. We do not have any
+implementations of this at the current point, but there seems
+to be enough of a general interest in the topic.
+Also make the format description slightly clearer.
+
+  * Remove some compile-time warnings in -Wall
+(mostly signed/unsigned comparisons), for easier embedding
+into projects that use -Wall -Werror.
+
+
+Snappy v1.0.4, September 15th 2011:
+
+  * Speeded up the decompressor somewhat; typically about 2–8%
+for Core i7, in 64-bit mode (comparable for Opteron).
+Somewhat more for some tests, almost no gain for others.
+  
+  * Make Snappy compile on certain platforms it didn't before
+(Solaris with SunPro C++, HP-UX, AIX).
+
+  * Correct some minor errors in the format description.
+
+
+Snappy v1.0.3, June 2nd 2011:
+
+  * Speeded up the decompressor somewhat; about 3-6% for Core 2,
+6-13% for Core i7, and 5-12% for Opteron (all in 64-bit mode).
+
+  * Added compressed 

[27/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_bench.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_bench.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_bench.cc
new file mode 100644
index 000..fc46d89
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/db_bench.cc
@@ -0,0 +1,979 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include 
+#include 
+#include 
+#include "db/db_impl.h"
+#include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/env.h"
+#include "leveldb/write_batch.h"
+#include "port/port.h"
+#include "util/crc32c.h"
+#include "util/histogram.h"
+#include "util/mutexlock.h"
+#include "util/random.h"
+#include "util/testutil.h"
+
+// Comma-separated list of operations to run in the specified order
+//   Actual benchmarks:
+//  fillseq   -- write N values in sequential key order in async mode
+//  fillrandom-- write N values in random key order in async mode
+//  overwrite -- overwrite N values in random key order in async mode
+//  fillsync  -- write N/100 values in random key order in sync mode
+//  fill100K  -- write N/1000 100K values in random order in async mode
+//  deleteseq -- delete N keys in sequential order
+//  deleterandom  -- delete N keys in random order
+//  readseq   -- read N times sequentially
+//  readreverse   -- read N times in reverse order
+//  readrandom-- read N times in random order
+//  readmissing   -- read N missing keys in random order
+//  readhot   -- read N times in random order from 1% section of DB
+//  seekrandom-- N random seeks
+//  crc32c-- repeated crc32c of 4K of data
+//  acquireload   -- load N*1000 times
+//   Meta operations:
+//  compact -- Compact the entire DB
+//  stats   -- Print DB stats
+//  sstables-- Print sstable info
+//  heapprofile -- Dump a heap profile (if supported by this port)
+static const char* FLAGS_benchmarks =
+"fillseq,"
+"fillsync,"
+"fillrandom,"
+"overwrite,"
+"readrandom,"
+"readrandom,"  // Extra run to allow previous compactions to quiesce
+"readseq,"
+"readreverse,"
+"compact,"
+"readrandom,"
+"readseq,"
+"readreverse,"
+"fill100K,"
+"crc32c,"
+"snappycomp,"
+"snappyuncomp,"
+"acquireload,"
+;
+
+// Number of key/values to place in database
+static int FLAGS_num = 100;
+
+// Number of read operations to do.  If negative, do FLAGS_num reads.
+static int FLAGS_reads = -1;
+
+// Number of concurrent threads to run.
+static int FLAGS_threads = 1;
+
+// Size of each value
+static int FLAGS_value_size = 100;
+
+// Arrange to generate values that shrink to this fraction of
+// their original size after compression
+static double FLAGS_compression_ratio = 0.5;
+
+// Print histogram of operation timings
+static bool FLAGS_histogram = false;
+
+// Number of bytes to buffer in memtable before compacting
+// (initialized to default value by "main")
+static int FLAGS_write_buffer_size = 0;
+
+// Number of bytes to use as a cache of uncompressed data.
+// Negative means use default settings.
+static int FLAGS_cache_size = -1;
+
+// Maximum number of files to keep open at the same time (use default if == 0)
+static int FLAGS_open_files = 0;
+
+// Bloom filter bits per key.
+// Negative means use default settings.
+static int FLAGS_bloom_bits = -1;
+
+// If true, do not destroy the existing database.  If you set this
+// flag and also specify a benchmark that wants a fresh database, that
+// benchmark will fail.
+static bool FLAGS_use_existing_db = false;
+
+// Use the db with the following name.
+static const char* FLAGS_db = NULL;
+
+namespace leveldb {
+
+namespace {
+
+// Helper for quickly generating random data.
+class RandomGenerator {
+ private:
+  std::string data_;
+  int pos_;
+
+ public:
+  RandomGenerator() {
+// We use a limited amount of data over and over again and ensure
+// that it is larger than the compression window (32KB), and also
+// large enough to serve all typical value sizes we want to write.
+Random rnd(301);
+std::string piece;
+while (data_.size() < 1048576) {
+  // Add a short fragment that is as compressible as specified
+  // by FLAGS_compression_ratio.
+  test::CompressibleString(, FLAGS_compression_ratio, 100, );
+  data_.append(piece);
+}
+pos_ = 0;
+  }
+
+  Slice Generate(size_t len) {
+if (pos_ + len > data_.size()) {
+  pos_ = 0;
+  assert(len < data_.size());
+}
+pos_ += 

[22/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/bench/db_bench_sqlite3.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/bench/db_bench_sqlite3.cc
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/bench/db_bench_sqlite3.cc
new file mode 100644
index 000..e63aaa8
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/doc/bench/db_bench_sqlite3.cc
@@ -0,0 +1,718 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include 
+#include 
+#include 
+#include "util/histogram.h"
+#include "util/random.h"
+#include "util/testutil.h"
+
+// Comma-separated list of operations to run in the specified order
+//   Actual benchmarks:
+//
+//   fillseq   -- write N values in sequential key order in async mode
+//   fillseqsync   -- write N/100 values in sequential key order in sync mode
+//   fillseqbatch  -- batch write N values in sequential key order in async 
mode
+//   fillrandom-- write N values in random key order in async mode
+//   fillrandsync  -- write N/100 values in random key order in sync mode
+//   fillrandbatch -- batch write N values in sequential key order in async 
mode
+//   overwrite -- overwrite N values in random key order in async mode
+//   fillrand100K  -- write N/1000 100K values in random order in async mode
+//   fillseq100K   -- write N/1000 100K values in sequential order in async 
mode
+//   readseq   -- read N times sequentially
+//   readrandom-- read N times in random order
+//   readrand100K  -- read N/1000 100K values in sequential order in async mode
+static const char* FLAGS_benchmarks =
+"fillseq,"
+"fillseqsync,"
+"fillseqbatch,"
+"fillrandom,"
+"fillrandsync,"
+"fillrandbatch,"
+"overwrite,"
+"overwritebatch,"
+"readrandom,"
+"readseq,"
+"fillrand100K,"
+"fillseq100K,"
+"readseq,"
+"readrand100K,"
+;
+
+// Number of key/values to place in database
+static int FLAGS_num = 100;
+
+// Number of read operations to do.  If negative, do FLAGS_num reads.
+static int FLAGS_reads = -1;
+
+// Size of each value
+static int FLAGS_value_size = 100;
+
+// Print histogram of operation timings
+static bool FLAGS_histogram = false;
+
+// Arrange to generate values that shrink to this fraction of
+// their original size after compression
+static double FLAGS_compression_ratio = 0.5;
+
+// Page size. Default 1 KB.
+static int FLAGS_page_size = 1024;
+
+// Number of pages.
+// Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB.
+static int FLAGS_num_pages = 4096;
+
+// If true, do not destroy the existing database.  If you set this
+// flag and also specify a benchmark that wants a fresh database, that
+// benchmark will fail.
+static bool FLAGS_use_existing_db = false;
+
+// If true, we allow batch writes to occur
+static bool FLAGS_transaction = true;
+
+// If true, we enable Write-Ahead Logging
+static bool FLAGS_WAL_enabled = true;
+
+// Use the db with the following name.
+static const char* FLAGS_db = NULL;
+
+inline
+static void ExecErrorCheck(int status, char *err_msg) {
+  if (status != SQLITE_OK) {
+fprintf(stderr, "SQL error: %s\n", err_msg);
+sqlite3_free(err_msg);
+exit(1);
+  }
+}
+
+inline
+static void StepErrorCheck(int status) {
+  if (status != SQLITE_DONE) {
+fprintf(stderr, "SQL step error: status = %d\n", status);
+exit(1);
+  }
+}
+
+inline
+static void ErrorCheck(int status) {
+  if (status != SQLITE_OK) {
+fprintf(stderr, "sqlite3 error: status = %d\n", status);
+exit(1);
+  }
+}
+
+inline
+static void WalCheckpoint(sqlite3* db_) {
+  // Flush all writes to disk
+  if (FLAGS_WAL_enabled) {
+sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
+  }
+}
+
+namespace leveldb {
+
+// Helper for quickly generating random data.
+namespace {
+class RandomGenerator {
+ private:
+  std::string data_;
+  int pos_;
+
+ public:
+  RandomGenerator() {
+// We use a limited amount of data over and over again and ensure
+// that it is larger than the compression window (32KB), and also
+// large enough to serve all typical value sizes we want to write.
+Random rnd(301);
+std::string piece;
+while (data_.size() < 1048576) {
+  // Add a short fragment that is as compressible as specified
+  // by FLAGS_compression_ratio.
+  test::CompressibleString(, FLAGS_compression_ratio, 100, );
+  data_.append(piece);
+}
+pos_ = 0;
+  }
+
+  Slice Generate(int len) {
+if (pos_ + len > data_.size()) {
+  pos_ = 0;
+  assert(len < data_.size());
+}
+pos_ += len;
+return Slice(data_.data() + pos_ - len, len);
+  

[30/50] [abbrv] hadoop git commit: [partial-ns] Add sync options in WriteOptions.

2016-01-05 Thread wheat9
[partial-ns] Add sync options in WriteOptions.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af9ff74e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af9ff74e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af9ff74e

Branch: refs/heads/feature-HDFS-8286
Commit: af9ff74e8ba89fd7025c025b00eedbad7e38d991
Parents: 8a8b70d
Author: Haohui Mai 
Authored: Mon Sep 22 16:06:02 2014 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:00 2015 -0700

--
 .../main/java/org/apache/hadoop/hdfs/hdfsdb/NativeObject.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java  | 6 ++
 .../hadoop-hdfsdb/src/main/native/jni/bindings.cc  | 5 +
 3 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af9ff74e/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/NativeObject.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/NativeObject.java
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/NativeObject.java
index 0290e84..311485f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/NativeObject.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/NativeObject.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.hdfsdb;
 
 abstract class NativeObject implements AutoCloseable {
   static {
-System.loadLibrary("hdfs-jni");
+System.loadLibrary("hdfsdb-jni");
   }
   protected long nativeHandle;
   protected long nativeHandle() { return nativeHandle; }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af9ff74e/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
index cd6d2b7..88ebcad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/WriteOptions.java
@@ -22,6 +22,11 @@ public class WriteOptions extends NativeObject {
 super(construct());
   }
 
+  public WriteOptions sync(boolean value) {
+sync(nativeHandle, value);
+return this;
+  }
+
   @Override
   public void close() {
 if (nativeHandle != 0) {
@@ -32,4 +37,5 @@ public class WriteOptions extends NativeObject {
 
   private static native long construct();
   private static native void destruct(long handle);
+  private static native void sync(long handle, boolean value);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af9ff74e/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/jni/bindings.cc
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/jni/bindings.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/jni/bindings.cc
index 6682159..0b03766 100644
--- a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/jni/bindings.cc
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/jni/bindings.cc
@@ -224,6 +224,11 @@ jlong JNICALL 
Java_org_apache_hadoop_hdfs_hdfsdb_WriteOptions_construct(JNIEnv *
   return uintptr(new leveldb::WriteOptions());
 }
 
+void JNICALL Java_org_apache_hadoop_hdfs_hdfsdb_WriteOptions_sync(JNIEnv *, 
jclass, jlong handle, jboolean value) {
+  leveldb::WriteOptions *options = 
reinterpret_cast(handle);
+  options->sync = value;
+}
+
 void JNICALL Java_org_apache_hadoop_hdfs_hdfsdb_WriteOptions_destruct(JNIEnv 
*, jclass, jlong handle) {
   delete reinterpret_cast(handle);
 }



[15/50] [abbrv] hadoop git commit: [partial-ns] Mavenize the build of HDFSDB.

2016-01-05 Thread wheat9
[partial-ns] Mavenize the build of HDFSDB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f99a9d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f99a9d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f99a9d1

Branch: refs/heads/feature-HDFS-8286
Commit: 6f99a9d1eb18f94c5af5351e7985466ebe9485da
Parents: 4a6419f
Author: Haohui Mai 
Authored: Thu Sep 18 15:56:05 2014 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:59 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfsdb/pom.xml   | 68 
 .../src/main/native/CMakeLists.txt  | 11 
 .../src/main/native/hdfsdb/CMakeLists.txt   | 64 ++
 hadoop-hdfs-project/pom.xml |  1 +
 4 files changed, 144 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f99a9d1/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
new file mode 100644
index 000..1eef71e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
@@ -0,0 +1,68 @@
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-project-dist
+3.0.0-SNAPSHOT
+../../hadoop-project-dist
+  
+  org.apache.hadoop
+  hadoop-hdfsdb
+  3.0.0-SNAPSHOT
+  Apache Hadoop HDFSDB
+  Apache Hadoop HDFSDB
+  jar
+
+  
+UTF-8
+  
+
+  
+
+  junit
+  junit
+  test
+
+  
+
+  
+
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+make
+compile
+run
+
+  
+
+
+  
+
+
+  
+
+  
+
+  
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f99a9d1/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/CMakeLists.txt
new file mode 100644
index 000..d21b897
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/CMakeLists.txt
@@ -0,0 +1,11 @@
+cmake_minimum_required(VERSION 2.8)
+
+set(CMAKE_BUILD_TYPE Release)
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -pedantic -Wextra 
-Wno-unused-parameter -Wno-sign-compare -fvisibility=hidden")
+
+if(APPLE)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+endif()
+
+add_subdirectory(hdfsdb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f99a9d1/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
new file mode 100644
index 000..eb2b5b2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/CMakeLists.txt
@@ -0,0 +1,64 @@
+cmake_minimum_required(VERSION 2.8)
+PROJECT(hdfsdb)
+
+ENABLE_TESTING()
+
+set(VERSION_MAJOR 1)
+set(VERSION_MINOR 15)
+set(VERRION_PATCH 0)
+set(VERSION_STRING ${VERSION_MAJOR}.${VERSION_MINOR})
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR} include)
+
+set(HDFSDBSRCS db/builder.cc db/db_impl.cc db/db_iter.cc db/dbformat.cc
+  db/filename.cc db/log_reader.cc db/log_writer.cc db/memtable.cc
+  db/repair.cc db/table_cache.cc db/version_edit.cc db/version_set.cc
+  db/write_batch.cc
+  helpers/memenv/memenv.cc
+  table/block_builder.cc table/block.cc table/filter_block.cc table/format.cc
+  table/iterator.cc table/merger.cc table/table_builder.cc table/table.cc
+  table/two_level_iterator.cc
+  util/arena.cc util/bloom.cc util/cache.cc util/coding.cc util/comparator.cc
+  util/crc32c.cc util/env.cc util/filter_policy.cc util/hash.cc 
util/histogram.cc
+  util/logging.cc util/options.cc util/status.cc
+)
+
+if(APPLE)
+add_definitions(-DOS_MACOSX -DLEVELDB_PLATFORM_POSIX)
+set(PORT_SRCS port/port_posix.cc util/env_posix.cc)
+endif()
+
+add_library(hdfsdb STATIC ${HDFSDBSRCS} ${PORT_SRCS})
+
+set(TESTHARNESS_SRCS util/testutil.cc util/testharness.cc)
+add_library(hdfsdb-test-harness ${TESTHARNESS_SRCS})
+
+macro(hdfsdb_tests)
+  get_filename_component(_tname ${ARGN} NAME_WE)
+  add_executable(${_tname} ${ARGN})
+  target_link_libraries(${_tname} hdfsdb hdfsdb-test-harness)
+  

[43/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy-test.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy-test.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy-test.cc
new file mode 100644
index 000..4619410
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/snappy-test.cc
@@ -0,0 +1,606 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various stubs for the unit tests for the open-source version of Snappy.
+
+#include "snappy-test.h"
+
+#ifdef HAVE_WINDOWS_H
+#define WIN32_LEAN_AND_MEAN
+#include 
+#endif
+
+#include 
+
+DEFINE_bool(run_microbenchmarks, true,
+"Run microbenchmarks before doing anything else.");
+
+namespace snappy {
+
+string ReadTestDataFile(const string& base, size_t size_limit) {
+  string contents;
+  const char* srcdir = getenv("srcdir");  // This is set by Automake.
+  string prefix;
+  if (srcdir) {
+prefix = string(srcdir) + "/";
+  }
+  file::GetContents(prefix + "testdata/" + base, , file::Defaults()
+  ).CheckSuccess();
+  if (size_limit > 0) {
+contents = contents.substr(0, size_limit);
+  }
+  return contents;
+}
+
+string ReadTestDataFile(const string& base) {
+  return ReadTestDataFile(base, 0);
+}
+
+string StringPrintf(const char* format, ...) {
+  char buf[4096];
+  va_list ap;
+  va_start(ap, format);
+  vsnprintf(buf, sizeof(buf), format, ap);
+  va_end(ap);
+  return buf;
+}
+
+bool benchmark_running = false;
+int64 benchmark_real_time_us = 0;
+int64 benchmark_cpu_time_us = 0;
+string *benchmark_label = NULL;
+int64 benchmark_bytes_processed = 0;
+
+void ResetBenchmarkTiming() {
+  benchmark_real_time_us = 0;
+  benchmark_cpu_time_us = 0;
+}
+
+#ifdef WIN32
+LARGE_INTEGER benchmark_start_real;
+FILETIME benchmark_start_cpu;
+#else  // WIN32
+struct timeval benchmark_start_real;
+struct rusage benchmark_start_cpu;
+#endif  // WIN32
+
+void StartBenchmarkTiming() {
+#ifdef WIN32
+  QueryPerformanceCounter(_start_real);
+  FILETIME dummy;
+  CHECK(GetProcessTimes(
+  GetCurrentProcess(), , , , _start_cpu));
+#else
+  gettimeofday(_start_real, NULL);
+  if (getrusage(RUSAGE_SELF, _start_cpu) == -1) {
+perror("getrusage(RUSAGE_SELF)");
+exit(1);
+  }
+#endif
+  benchmark_running = true;
+}
+
+void StopBenchmarkTiming() {
+  if (!benchmark_running) {
+return;
+  }
+
+#ifdef WIN32
+  LARGE_INTEGER benchmark_stop_real;
+  LARGE_INTEGER benchmark_frequency;
+  QueryPerformanceCounter(_stop_real);
+  QueryPerformanceFrequency(_frequency);
+
+  double elapsed_real = static_cast(
+  benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
+  benchmark_frequency.QuadPart;
+  benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
+
+  FILETIME benchmark_stop_cpu, dummy;
+  CHECK(GetProcessTimes(
+  GetCurrentProcess(), , , , _stop_cpu));
+
+  ULARGE_INTEGER start_ulargeint;
+  start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
+  start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
+
+  ULARGE_INTEGER stop_ulargeint;
+  stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
+  stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
+
+  benchmark_cpu_time_us +=
+  (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
+#else  // WIN32
+  struct timeval 

hadoop git commit: HDFS-7779. Support changing ownership, group and replication in HDFS Web UI. Contributed by Ravi Prakash.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fc6c94046 -> 79dc18422


HDFS-7779. Support changing ownership, group and replication in HDFS Web UI. 
Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79dc1842
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79dc1842
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79dc1842

Branch: refs/heads/branch-2
Commit: 79dc18422489b2b165b7ac35bb3f00cefd78fcec
Parents: fc6c940
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Jan 5 13:43:32 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Jan 5 13:44:40 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../src/main/webapps/hdfs/explorer.html |  8 --
 .../src/main/webapps/hdfs/explorer.js   | 30 
 3 files changed, 38 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79dc1842/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 843bdce1..25e010e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -910,6 +910,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9552. Document types of permission checks performed for HDFS
 operations. (cnauroth)
 
+HDFS-7779. Support changing ownership, group and replication in HDFS Web
+UI. (Ravi Prakash via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79dc1842/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index fb4397f..3fcacf9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -20,6 +20,7 @@
   
 
 
+
 
 Browsing HDFS
   
@@ -231,11 +232,11 @@
   {type|helper_to_directory}{permission|helper_to_permission}
   {aclBit|helper_to_acl_bit}
   
-{owner}
-{group}
+{owner}
+{group}
 {length|fmt_bytes}
 {#helper_date_tostring value="{modificationTime}"/}
-{replication}
+{replication}
 {blockSize|fmt_bytes}
 {pathSuffix}
 
@@ -262,6 +263,7 @@
 
 
 
+
 
 
 

hadoop git commit: HDFS-7779. Support changing ownership, group and replication in HDFS Web UI. Contributed by Ravi Prakash.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 28bd13801 -> cea0972fa


HDFS-7779. Support changing ownership, group and replication in HDFS Web UI. 
Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cea0972f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cea0972f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cea0972f

Branch: refs/heads/trunk
Commit: cea0972fa13c4c3f6d6a12179f7e65552d1ae873
Parents: 28bd138
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Jan 5 13:43:32 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Jan 5 13:44:16 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../src/main/webapps/hdfs/explorer.html |  8 --
 .../src/main/webapps/hdfs/explorer.js   | 30 
 3 files changed, 38 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cea0972f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 207ef91..364f685 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1794,6 +1794,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9552. Document types of permission checks performed for HDFS
 operations. (cnauroth)
 
+HDFS-7779. Support changing ownership, group and replication in HDFS Web
+UI. (Ravi Prakash via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cea0972f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index fb4397f..3fcacf9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -20,6 +20,7 @@
   
 
 
+
 
 Browsing HDFS
   
@@ -231,11 +232,11 @@
   {type|helper_to_directory}{permission|helper_to_permission}
   {aclBit|helper_to_acl_bit}
   
-{owner}
-{group}
+{owner}
+{group}
 {length|fmt_bytes}
 {#helper_date_tostring value="{modificationTime}"/}
-{replication}
+{replication}
 {blockSize|fmt_bytes}
 {pathSuffix}
 
@@ -262,6 +263,7 @@
 
 
 
+
 
 
 

hadoop git commit: HDFS-7779. Support changing ownership, group and replication in HDFS Web UI. Contributed by Ravi Prakash.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f372d3faa -> 2aa99e6df


HDFS-7779. Support changing ownership, group and replication in HDFS Web UI. 
Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2aa99e6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2aa99e6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2aa99e6d

Branch: refs/heads/branch-2.8
Commit: 2aa99e6df00883a3b0562ecf4e9a88343b334f48
Parents: f372d3f
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Jan 5 13:43:32 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Jan 5 13:45:10 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../src/main/webapps/hdfs/explorer.html |  8 --
 .../src/main/webapps/hdfs/explorer.js   | 30 
 3 files changed, 38 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa99e6d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4d9e99b..87f71de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -869,6 +869,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8831. Trash Support for deletion in HDFS encryption zone. (xyao)
 
+HDFS-7779. Support changing ownership, group and replication in HDFS Web
+UI. (Ravi Prakash via wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa99e6d/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index fb4397f..3fcacf9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -20,6 +20,7 @@
   
 
 
+
 
 Browsing HDFS
   
@@ -231,11 +232,11 @@
   {type|helper_to_directory}{permission|helper_to_permission}
   {aclBit|helper_to_acl_bit}
   
-{owner}
-{group}
+{owner}
+{group}
 {length|fmt_bytes}
 {#helper_date_tostring value="{modificationTime}"/}
-{replication}
+{replication}
 {blockSize|fmt_bytes}
 {pathSuffix}
 
@@ -262,6 +263,7 @@
 
 
 
+
 
 
 

hadoop git commit: HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI. Contributed by Archana T.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 79dc18422 -> 150c9a80e


HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI. 
Contributed by Archana T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/150c9a80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/150c9a80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/150c9a80

Branch: refs/heads/branch-2
Commit: 150c9a80ee3685e33cea9e2c835503d2b8d11e2c
Parents: 79dc184
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Jan 5 13:55:55 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Jan 5 13:56:08 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/150c9a80/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 25e010e..1d929a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1676,6 +1676,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException.
 (Xiao Chen via cnauroth)
 
+HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI.
+(Archana T via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/150c9a80/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 3fcacf9..8bfc0c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -35,6 +35,7 @@
   
 Overview
 Datanodes
+Datanode 
Volume Failures
 Snapshot
 Startup 
Progress
 



hadoop git commit: HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI. Contributed by Archana T.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2aa99e6df -> 6dcf3cb6a


HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI. 
Contributed by Archana T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dcf3cb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dcf3cb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dcf3cb6

Branch: refs/heads/branch-2.8
Commit: 6dcf3cb6a1b5def54265f9af8290fce5996af5d7
Parents: 2aa99e6
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Jan 5 13:55:55 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Jan 5 13:56:18 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dcf3cb6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 87f71de..5cb0fcc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1632,6 +1632,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException.
 (Xiao Chen via cnauroth)
 
+HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI.
+(Archana T via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dcf3cb6/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 3fcacf9..8bfc0c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -35,6 +35,7 @@
   
 Overview
 Datanodes
+Datanode 
Volume Failures
 Snapshot
 Startup 
Progress
 



hadoop git commit: HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI. Contributed by Archana T.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk cea0972fa -> dec8fedb6


HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI. 
Contributed by Archana T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dec8fedb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dec8fedb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dec8fedb

Branch: refs/heads/trunk
Commit: dec8fedb65f6797c22af17ecc901b56a29836da3
Parents: cea0972
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Jan 5 13:55:55 2016 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Jan 5 13:55:55 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html   | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dec8fedb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 364f685..6427a74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2556,6 +2556,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException.
 (Xiao Chen via cnauroth)
 
+HDFS-9605. Add links to failed volumes to explorer.html in HDFS Web UI.
+(Archana T via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dec8fedb/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 3fcacf9..8bfc0c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -35,6 +35,7 @@
   
 Overview
 Datanodes
+Datanode 
Volume Failures
 Snapshot
 Startup 
Progress
 



[36/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/kppkn.gtb
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/kppkn.gtb 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/kppkn.gtb
new file mode 100644
index 000..8ccea79
Binary files /dev/null and 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/testdata/kppkn.gtb 
differ



[24/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/skiplist.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/skiplist.h 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/skiplist.h
new file mode 100644
index 000..af85be6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/db/skiplist.h
@@ -0,0 +1,379 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+//
+// Thread safety
+// -
+//
+// Writes require external synchronization, most likely a mutex.
+// Reads require a guarantee that the SkipList will not be destroyed
+// while the read is in progress.  Apart from that, reads progress
+// without any internal locking or synchronization.
+//
+// Invariants:
+//
+// (1) Allocated nodes are never deleted until the SkipList is
+// destroyed.  This is trivially guaranteed by the code since we
+// never delete any skip list nodes.
+//
+// (2) The contents of a Node except for the next/prev pointers are
+// immutable after the Node has been linked into the SkipList.
+// Only Insert() modifies the list, and it is careful to initialize
+// a node and use release-stores to publish the nodes in one or
+// more lists.
+//
+// ... prev vs. next pointer ordering ...
+
+#include 
+#include 
+#include "port/port.h"
+#include "util/arena.h"
+#include "util/random.h"
+
+namespace leveldb {
+
+class Arena;
+
+template
+class SkipList {
+ private:
+  struct Node;
+
+ public:
+  // Create a new SkipList object that will use "cmp" for comparing keys,
+  // and will allocate memory using "*arena".  Objects allocated in the arena
+  // must remain allocated for the lifetime of the skiplist object.
+  explicit SkipList(Comparator cmp, Arena* arena);
+
+  // Insert key into the list.
+  // REQUIRES: nothing that compares equal to key is currently in the list.
+  void Insert(const Key& key);
+
+  // Returns true iff an entry that compares equal to key is in the list.
+  bool Contains(const Key& key) const;
+
+  // Iteration over the contents of a skip list
+  class Iterator {
+   public:
+// Initialize an iterator over the specified list.
+// The returned iterator is not valid.
+explicit Iterator(const SkipList* list);
+
+// Returns true iff the iterator is positioned at a valid node.
+bool Valid() const;
+
+// Returns the key at the current position.
+// REQUIRES: Valid()
+const Key& key() const;
+
+// Advances to the next position.
+// REQUIRES: Valid()
+void Next();
+
+// Advances to the previous position.
+// REQUIRES: Valid()
+void Prev();
+
+// Advance to the first entry with a key >= target
+void Seek(const Key& target);
+
+// Position at the first entry in list.
+// Final state of iterator is Valid() iff list is not empty.
+void SeekToFirst();
+
+// Position at the last entry in list.
+// Final state of iterator is Valid() iff list is not empty.
+void SeekToLast();
+
+   private:
+const SkipList* list_;
+Node* node_;
+// Intentionally copyable
+  };
+
+ private:
+  enum { kMaxHeight = 12 };
+
+  // Immutable after construction
+  Comparator const compare_;
+  Arena* const arena_;// Arena used for allocations of nodes
+
+  Node* const head_;
+
+  // Modified only by Insert().  Read racily by readers, but stale
+  // values are ok.
+  port::AtomicPointer max_height_;   // Height of the entire list
+
+  inline int GetMaxHeight() const {
+return static_cast(
+reinterpret_cast(max_height_.NoBarrier_Load()));
+  }
+
+  // Read/written only by Insert().
+  Random rnd_;
+
+  Node* NewNode(const Key& key, int height);
+  int RandomHeight();
+  bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); 
}
+
+  // Return true if key is greater than the data stored in "n"
+  bool KeyIsAfterNode(const Key& key, Node* n) const;
+
+  // Return the earliest node that comes at or after key.
+  // Return NULL if there is no such node.
+  //
+  // If prev is non-NULL, fills prev[level] with pointer to previous
+  // node at "level" for every level in [0..max_height_-1].
+  Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
+
+  // Return the latest node with a key < key.
+  // Return head_ if there is no such node.
+  Node* FindLessThan(const Key& key) const;
+
+  // Return the last node in the list.
+  // Return head_ if list is empty.
+  Node* FindLast() const;
+
+  // No copying allowed
+  SkipList(const SkipList&);
+  void operator=(const SkipList&);
+};
+
+// Implementation details follow
+template
+struct SkipList::Node {
+  explicit Node(const Key& k) : key(k) { }
+
+  Key const key;
+
+  // 

[29/50] [abbrv] hadoop git commit: [partial-ns] Implement JNI bindings for HDFSDB.

2016-01-05 Thread wheat9
[partial-ns] Implement JNI bindings for HDFSDB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a8b70db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a8b70db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a8b70db

Branch: refs/heads/feature-HDFS-8286
Commit: 8a8b70db882607a991f896e9e651dc9328e7b98a
Parents: cb5ba73
Author: Haohui Mai 
Authored: Fri Nov 7 18:06:22 2014 -0800
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 +
 hadoop-hdfs-project/hadoop-hdfsdb/pom.xml   |  25 ++
 .../java/org/apache/hadoop/hdfs/hdfsdb/DB.java  |  70 +
 .../org/apache/hadoop/hdfs/hdfsdb/Iterator.java |  75 ++
 .../apache/hadoop/hdfs/hdfsdb/NativeObject.java |  35 +++
 .../org/apache/hadoop/hdfs/hdfsdb/Options.java  |  73 ++
 .../apache/hadoop/hdfs/hdfsdb/ReadOptions.java  |  35 +++
 .../apache/hadoop/hdfs/hdfsdb/WriteBatch.java   |  50 
 .../apache/hadoop/hdfs/hdfsdb/WriteOptions.java |  35 +++
 .../src/main/native/CMakeLists.txt  |   1 +
 .../src/main/native/jni/CMakeLists.txt  |  13 +
 .../src/main/native/jni/bindings.cc | 254 +++
 hadoop-project/pom.xml  |   5 +
 13 files changed, 676 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a8b70db/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 34338d1..e36b409 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -56,6 +56,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
   org.apache.hadoop
+  hadoop-hdfsdb
+  compile
+
+
+  org.apache.hadoop
   hadoop-common
   test
   test-jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a8b70db/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
index 1eef71e..34a5b3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/pom.xml
@@ -43,6 +43,31 @@
   
 
   
+org.codehaus.mojo
+native-maven-plugin
+
+  
+compile
+
+  javah
+
+
+  ${env.JAVA_HOME}/bin/javah
+  
+
org.apache.hadoop.hdfs.hdfsdb.NativeObject
+
org.apache.hadoop.hdfs.hdfsdb.DB
+
org.apache.hadoop.hdfs.hdfsdb.Iterator
+
org.apache.hadoop.hdfs.hdfsdb.Options
+
org.apache.hadoop.hdfs.hdfsdb.ReadOptions
+
org.apache.hadoop.hdfs.hdfsdb.WriteOptions
+
org.apache.hadoop.hdfs.hdfsdb.WriteBatch
+  
+  
${project.build.directory}/native/javah
+
+  
+
+  
+  
 org.apache.maven.plugins
 maven-antrun-plugin
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a8b70db/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/DB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/DB.java
 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/DB.java
new file mode 100644
index 000..0355dcc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/java/org/apache/hadoop/hdfs/hdfsdb/DB.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.hdfsdb;
+
+import java.io.IOException;
+
+public class DB extends NativeObject {
+  public static DB open(Options options, String path) throws 

[49/50] [abbrv] hadoop git commit: [partial-ns] Implement DBChildrenView.

2016-01-05 Thread wheat9
[partial-ns] Implement DBChildrenView.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d7972d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d7972d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d7972d4

Branch: refs/heads/feature-HDFS-8286
Commit: 2d7972d4473a5a7c3a4cf2fbca6e23b4a7d38ca6
Parents: c2cf9bc
Author: Haohui Mai 
Authored: Wed May 27 18:10:47 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:01 2015 -0700

--
 .../hdfs/server/namenode/DBChildrenView.java| 31 ++
 .../hdfs/server/namenode/FSDirDeleteOp.java | 37 +---
 .../hdfs/server/namenode/FSDirRenameOp.java | 18 ++--
 .../server/namenode/FSDirStatAndListingOp.java  | 99 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  6 +-
 .../hdfs/server/namenode/MemDBChildrenView.java | 36 +++
 .../hdfs/server/namenode/ROTransaction.java |  6 +-
 .../hdfs/server/namenode/RWTransaction.java |  5 +-
 .../hdfs/server/namenode/Transaction.java   |  2 +-
 9 files changed, 161 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
new file mode 100644
index 000..c46acaa
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DBChildrenView.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.Closeable;
+import java.nio.ByteBuffer;
+import java.util.Map;
+
+abstract class DBChildrenView implements Closeable, Iterable > {
+  abstract int size();
+  abstract void seekTo(ByteBuffer start);
+  boolean isEmpty() {
+return size() == 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7972d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 74dcf46..af253ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -26,7 +26,9 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.util.Time.now;
 
@@ -165,7 +167,8 @@ class FSDirDeleteOp {
 
 long mtime = now();
 // Unlink the target directory from directory tree
-long filesRemoved = delete(tx, paths, collectedBlocks, removedUCFiles, 
mtime);
+long filesRemoved = delete(tx, paths, collectedBlocks, removedUCFiles,
+   mtime);
 if (filesRemoved < 0) {
   return null;
 }
@@ -216,7 +219,8 @@ class FSDirDeleteOp {
*/
   private static long unprotectedDelete(
   RWTransaction tx, Resolver.Result paths,
-  BlocksMapUpdateInfo collectedBlocks, List removedUCFiles, long 
mtime) {
+  BlocksMapUpdateInfo collectedBlocks, List removedUCFiles, long 
mtime)
+  throws IOException {
 // TODO: Update quota
 FlatINode parent = paths.inodesInPath().getLastINode(-2);
 FlatINode inode = paths.inodesInPath().getLastINode();
@@ -236,21 

[07/50] [abbrv] hadoop git commit: Update quota usage for snapshots.

2016-01-05 Thread wheat9
Update quota usage for snapshots.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d8590d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d8590d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d8590d9

Branch: refs/heads/feature-HDFS-8286
Commit: 1d8590d949e5ed30afadcb67d4781dc188115564
Parents: 2bace6f
Author: Haohui Mai 
Authored: Tue May 5 14:22:54 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:56 2015 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java   | 5 ++---
 .../org/apache/hadoop/hdfs/server/namenode/INodeFile.java | 7 +++
 .../server/namenode/snapshot/FileWithSnapshotFeature.java | 4 +---
 3 files changed, 6 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d8590d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 950b5ce..5f87f11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1051,14 +1051,13 @@ public class FSEditLogLoader {
   // what about an old-version fsync() where fsync isn't called
   // until several blocks in?
   newBI = new BlockInfoContiguousUnderConstruction(
-  newBlock, file.getPreferredBlockReplication());
+  newBlock, file.getFileReplication());
 } else {
   // OP_CLOSE should add finalized blocks. This code path
   // is only executed when loading edits written by prior
   // versions of Hadoop. Current versions always log
   // OP_ADD operations as each block is allocated.
-  newBI = new BlockInfoContiguous(newBlock,
-  file.getPreferredBlockReplication());
+  newBI = new BlockInfoContiguous(newBlock, file.getFileReplication());
 }
 fsNamesys.getBlockManager().addBlockCollection(newBI, file);
 file.addBlock(newBI);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d8590d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index b97ea90..fb25e9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -595,12 +595,12 @@ public class INodeFile extends INodeWithAdditionalFields
 if (bsp != null) {
   List storageTypes = bsp.chooseStorageTypes(replication);
   for (StorageType t : storageTypes) {
-if (!t.supportTypeQuota()) {
-  continue;
+if (t.supportTypeQuota()) {
+  counts.addTypeSpace(t, ssDeltaNoReplication);
 }
-counts.addTypeSpace(t, ssDeltaNoReplication);
   }
 }
+
 return counts;
   }
 
@@ -727,7 +727,6 @@ public class INodeFile extends INodeWithAdditionalFields
   blocks = allBlocks;
 }
 
-final short replication = getPreferredBlockReplication();
 for (BlockInfoContiguous b : blocks) {
   long blockSize = b.isComplete() ? b.getNumBytes() :
   getPreferredBlockSize();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d8590d9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 555a662..3ff4dbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -150,9 +150,7 @@ public class FileWithSnapshotFeature implements 
INode.Feature {

[01/50] [abbrv] hadoop git commit: [partial-ns] Implement FlatINodeFileFeature.

2016-01-05 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/feature-HDFS-8286 [created] 2425034b6


[partial-ns] Implement FlatINodeFileFeature.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9335b2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9335b2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9335b2c

Branch: refs/heads/feature-HDFS-8286
Commit: c9335b2c6390694eeed189760b08d879b46f685a
Parents: 8cc95f6
Author: Haohui Mai 
Authored: Mon Nov 10 22:29:30 2014 -0800
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:54 2015 -0700

--
 .../hadoop/hdfs/server/namenode/Encoding.java   |  31 ++
 .../hadoop/hdfs/server/namenode/FlatINode.java  | 107 ++-
 .../server/namenode/FlatINodeFeatureId.java |  72 +
 .../server/namenode/FlatINodeFileFeature.java   | 301 +++
 4 files changed, 507 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9335b2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Encoding.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Encoding.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Encoding.java
index e971176..afd0222 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Encoding.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Encoding.java
@@ -51,6 +51,14 @@ class Encoding {
 }
   }
 
+  static int computeRawVarint32Size(final int value) {
+if ((value & (0x <<  7)) == 0) return 1;
+if ((value & (0x << 14)) == 0) return 2;
+if ((value & (0x << 21)) == 0) return 3;
+if ((value & (0x << 28)) == 0) return 4;
+return 5;
+  }
+
   private static int computeRawVarint64Size(final long value) {
 if ((value & (0xL <<  7)) == 0) return 1;
 if ((value & (0xL << 14)) == 0) return 2;
@@ -63,4 +71,27 @@ class Encoding {
 if ((value & (0xL << 63)) == 0) return 9;
 return 10;
   }
+
+  static String readString(ByteBuffer buf) {
+int size = readRawVarint32(buf, buf.position());
+byte[] r  = new byte[size];
+ByteBuffer b = ((ByteBuffer) buf.slice().position(computeRawVarint32Size
+  (size)));
+b.get(r);
+return new String(r);
+  }
+
+  static int readRawVarint32(ByteBuffer buf, int off) {
+int r = 0;
+byte b = (byte) 0x80;
+for (int i = 0; i < 5 && (b & 0x80) != 0; ++i) {
+  b = buf.get(off + i);
+  r = (r << 7) | (b & 0x7f);
+}
+return r;
+  }
+
+  static int computeArraySize(int length) {
+return computeRawVarint32Size(length) + length;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9335b2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FlatINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FlatINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FlatINode.java
index ecc230a..f78c4f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FlatINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FlatINode.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedOutputStream;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -25,11 +26,17 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Iterator;
 
 /**
  * In-memory representation of an INode.
  */
 public final class FlatINode extends FlatObject {
+  public static final int FEATURE_OFFSET = Encoding.SIZEOF_LONG * 5 +
+Encoding.SIZEOF_INT;
+
   private FlatINode(ByteString data) {
 super(data);
   }
@@ -132,18 +139,77 @@ public final class FlatINode extends FlatObject {
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder("INode[");
-sb.append(isFile() ? "file" : "dir")
-.append(", id=" + id())
-.append("]");
+sb.append(isFile() ? "file" : "dir").append(", id=" + id()).append("]");
 return sb.toString();
   }
 
+  abstract static class 

[14/50] [abbrv] hadoop git commit: [partial-ns] Implement fsync().

2016-01-05 Thread wheat9
[partial-ns] Implement fsync().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f09c483
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f09c483
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f09c483

Branch: refs/heads/feature-HDFS-8286
Commit: 7f09c483706c75b9207e008b7891add133e76da4
Parents: 5fe5b9a
Author: Haohui Mai 
Authored: Mon May 25 19:51:53 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:56:58 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 12 ++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 28 +++--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  6 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 41 +---
 .../namenode/FileUnderConstructionFeature.java  | 18 -
 .../hdfs/server/namenode/RWTransaction.java | 10 +
 6 files changed, 68 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f09c483/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index afd8bb1..e6eb635 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -360,6 +360,18 @@ public class BlockManager {
 LOG.info("maxNumBlocksToLog  = " + maxNumBlocksToLog);
   }
 
+  public void updateLastBlockLength(Block block, long blockLength) {
+BlockInfoContiguous lastBlock = getStoredBlock(block);
+assert (lastBlock != null) : "The last block " + block + " is null when 
updating its length";
+assert (lastBlock instanceof BlockInfoContiguousUnderConstruction)
+: "The last block " + block
++ " is not a BlockInfoUnderConstruction when updating its length";
+assert !lastBlock.isComplete();
+BlockInfoContiguousUnderConstruction uc =
+(BlockInfoContiguousUnderConstruction) lastBlock;
+uc.setNumBytes(blockLength);
+  }
+
   private static BlockTokenSecretManager createBlockTokenSecretManager(
   final Configuration conf) throws IOException {
 final boolean isEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f09c483/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 4cb7f2c..9e9aa93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -100,13 +100,18 @@ class FSDirWriteFileOp {
*/
   static void persistBlocks(
   FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
-assert fsd.getFSNamesystem().hasWriteLock();
-Preconditions.checkArgument(file.isUnderConstruction());
-fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
+throw new IllegalStateException("Unimplemented");
+  }
+
+  static void persistBlocks(
+  RWTransaction tx, String path, FlatINode inode) {
+FlatINodeFileFeature f = inode.feature(FlatINodeFileFeature.class);
+Preconditions.checkArgument(f != null && f.inConstruction());
+tx.logUpdateBlocks(path, f);
 if(NameNode.stateChangeLog.isDebugEnabled()) {
-  NameNode.stateChangeLog.debug("persistBlocks: " + path
-  + " with " + file.getBlocks().length + " blocks is persisted to" 
+
-  " the file system");
+  NameNode.stateChangeLog.debug(
+  "persistBlocks: " + path + " with " + f.numBlocks() + " " +
+  "blocks is persisted to the file system");
 }
   }
 
@@ -528,8 +533,7 @@ class FSDirWriteFileOp {
 final INodeFile newNode;
 assert fsd.hasWriteLock();
 if (underConstruction) {
-  newNode = newINodeFile(id, permissions, modificationTime,
-  modificationTime, replication,
+  newNode = newINodeFile(id, permissions, modificationTime, 
modificationTime, replication,

[45/50] [abbrv] hadoop git commit: [partial-ns] Import snappy in hdfsdb.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb5ba73b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/ChangeLog
--
diff --git a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/ChangeLog 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/ChangeLog
new file mode 100644
index 000..edd46dd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/snappy/ChangeLog
@@ -0,0 +1,1916 @@
+
+r83 | snappy.mirror...@gmail.com | 2014-02-19 11:31:49 +0100 (Wed, 19 Feb 
2014) | 9 lines
+
+Fix public issue 82: Stop distributing benchmark data files that have
+unclear or unsuitable licensing.
+
+In general, we replace the files we can with liberally licensed data,
+and remove all the others (in particular all the parts of the Canterbury
+corpus that are not clearly in the public domain). The replacements
+do not always have the exact same characteristics as the original ones,
+but they are more than good enough to be useful for benchmarking.
+
+
+r82 | snappy.mirror...@gmail.com | 2013-10-25 15:31:27 +0200 (Fri, 25 Oct 
2013) | 8 lines
+
+Add support for padding in the Snappy framed format.
+
+This is specifically motivated by DICOM's demands that embedded data
+must be of an even number of bytes, but could in principle be used for
+any sort of padding/alignment needed.
+
+R=sanjay
+
+
+r81 | snappy.mirror...@gmail.com | 2013-10-15 17:21:31 +0200 (Tue, 15 Oct 
2013) | 4 lines
+
+Release Snappy 1.1.1.
+
+R=jeff
+
+
+r80 | snappy.mirror...@gmail.com | 2013-08-13 14:55:00 +0200 (Tue, 13 Aug 
2013) | 6 lines
+
+Add autoconf tests for size_t and ssize_t. Sort-of resolves public issue 79;
+it would solve the problem if MSVC typically used autoconf. However, it gives
+a natural place (config.h) to put the typedef even for MSVC.
+
+R=jsbell
+
+
+r79 | snappy.mirror...@gmail.com | 2013-07-29 13:06:44 +0200 (Mon, 29 Jul 
2013) | 14 lines
+
+When we compare the number of bytes produced with the offset for a
+backreference, make the signedness of the bytes produced clear,
+by sticking it into a size_t. This avoids a signed/unsigned compare
+warning from MSVC (public issue 71), and also is slightly clearer.
+
+Since the line is now so long the explanatory comment about the -1u
+trick has to go somewhere else anyway, I used the opportunity to
+explain it in slightly more detail.
+
+This is a purely stylistic change; the emitted assembler from GCC
+is identical.
+
+R=jeff
+
+
+r78 | snappy.mirror...@gmail.com | 2013-06-30 21:24:03 +0200 (Sun, 30 Jun 
2013) | 111 lines
+
+In the fast path for decompressing literals, instead of checking
+whether there's 16 bytes free and then checking right afterwards
+(when having subtracted the literal size) that there are now 
+5 bytes free, just check once for 21 bytes. This skips a compare
+and a branch; although it is easily predictable, it is still
+a few cycles on a fast path that we would like to get rid of.
+
+Benchmarking this yields very confusing results. On open-source
+GCC 4.8.1 on Haswell, we get exactly the expected results; the
+benchmarks where we hit the fast path for literals (in particular
+the two HTML benchmarks and the protobuf benchmark) give very nice
+speedups, and the others are not really affected.
+
+However, benchmarks with Google's GCC branch on other hardware
+is much less clear. It seems that we have a weak loss in some cases
+(and the win for the “typical” win cases are not nearly as clear),
+but that it depends on microarchitecture and plain luck in how we run
+the benchmark. Looking at the generated assembler, it seems that
+the removal of the if causes other large-scale changes in how the
+function is laid out, which makes it likely that this is just bad luck.
+
+Thus, we should keep this change, even though its exact current impact is
+unclear; it's a sensible change per se, and dropping it on the basis of
+microoptimization for a given compiler (or even branch of a compiler)
+would seem like a bad strategy in the long run.
+
+Microbenchmark results (all in 64-bit, opt mode):
+
+  Nehalem, Google GCC:
+
+  BenchmarkBase (ns)  New (ns)   
Improvement
+  
--
+  BM_UFlat/0   76747 75591  1.3GB/s  html   +1.5%
+  BM_UFlat/1  765756757040  886.3MB/s  urls +1.2%
+  BM_UFlat/2   10867 10893  10.9GB/s  jpg   -0.2%
+  BM_UFlat/3  

[48/50] [abbrv] hadoop git commit: [partial-ns] Implement LevelDB-based fsimage.

2016-01-05 Thread wheat9
[partial-ns] Implement LevelDB-based fsimage.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23dbb0f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23dbb0f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23dbb0f9

Branch: refs/heads/feature-HDFS-8286
Commit: 23dbb0f9f5046ac508ba09ca960f1466fb552c61
Parents: 2d7972d
Author: Haohui Mai 
Authored: Thu May 28 16:49:13 2015 -0700
Committer: Haohui Mai 
Committed: Fri Jun 12 13:57:01 2015 -0700

--
 .../hdfs/server/namenode/FSDirectory.java   |  32 -
 .../server/namenode/LevelDBChildrenView.java|  75 +++
 .../server/namenode/LevelDBROTransaction.java   | 129 +++
 .../server/namenode/LevelDBRWTransaction.java   |  84 
 .../namenode/LevelDBReplayTransaction.java  |  85 
 5 files changed, 399 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23dbb0f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8744916..0e50d8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.hdfsdb.Options;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@@ -100,7 +101,6 @@ import static org.apache.hadoop.util.Time.now;
 @InterfaceAudience.Private
 public class FSDirectory implements Closeable {
   static final Logger LOG = LoggerFactory.getLogger(FSDirectory.class);
-
   private static INodeDirectory createRoot(FSNamesystem namesystem) {
 final INodeDirectory r = new INodeDirectory(
 ROOT_INODE_ID,
@@ -184,6 +184,12 @@ public class FSDirectory implements Closeable {
   private final FSEditLog editLog;
 
   private INodeAttributeProvider attributeProvider;
+  private final boolean enableLevelDb;
+  private final org.apache.hadoop.hdfs.hdfsdb.DB levelDb;
+
+  org.apache.hadoop.hdfs.hdfsdb.DB getLevelDb() {
+return levelDb;
+  }
 
   public void setINodeAttributeProvider(INodeAttributeProvider provider) {
 attributeProvider = provider;
@@ -244,11 +250,12 @@ public class FSDirectory implements Closeable {
   }
 
   RWTransaction newRWTransaction() {
-return new RWTransaction(this);
+return enableLevelDb ? new LevelDBRWTransaction(this) : new 
RWTransaction(this);
   }
 
   public ROTransaction newROTransaction() {
-return new ROTransaction(db());
+return enableLevelDb ? new LevelDBROTransaction(this, levelDb)
+: new ROTransaction(this);
   }
 
   public ReplayTransaction newReplayTransaction() {
@@ -338,9 +345,22 @@ public class FSDirectory implements Closeable {
 namesystem = ns;
 this.editLog = ns.getEditLog();
 ezManager = new EncryptionZoneManager(this, conf);
-this.db = new DB(dirLock);
-// TODO: Load fsimage
-db.addRoot(createRootForFlatNS(ns));
+this.enableLevelDb = conf.getBoolean("dfs.partialns", false);
+if (enableLevelDb) {
+  String dbPath = conf.get("dfs.partialns.path");
+  Options options = new Options().createIfMissing(true);
+  this.levelDb = org.apache.hadoop.hdfs.hdfsdb.DB.open(options, dbPath);
+  try (RWTransaction tx = newRWTransaction().begin()) {
+tx.putINode(ROOT_INODE_ID, createRootForFlatNS(ns));
+tx.commit();
+  }
+  this.db = null;
+} else {
+  this.db = new DB(dirLock);
+  // TODO: Load fsimage
+  db.addRoot(createRootForFlatNS(ns));
+  this.levelDb = null;
+}
   }
 
   FSNamesystem getFSNamesystem() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23dbb0f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBChildrenView.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBChildrenView.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LevelDBChildrenView.java
new file mode 100644
index 

[17/50] [abbrv] hadoop git commit: [partial-ns] Import HDFSDB.

2016-01-05 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a6419f4/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/histogram.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/histogram.cc 
b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/histogram.cc
new file mode 100644
index 000..bb95f58
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfsdb/src/main/native/hdfsdb/util/histogram.cc
@@ -0,0 +1,139 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+#include 
+#include 
+#include "port/port.h"
+#include "util/histogram.h"
+
+namespace leveldb {
+
+const double Histogram::kBucketLimit[kNumBuckets] = {
+  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
+  50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
+  500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
+  3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 1, 12000, 14000,
+  16000, 18000, 2, 25000, 3, 35000, 4, 45000, 5, 6,
+  7, 8, 9, 10, 12, 14, 16, 18, 20,
+  25, 30, 35, 40, 45, 50, 60, 70, 80,
+  90, 100, 120, 140, 160, 180, 200, 250,
+  300, 350, 400, 450, 500, 600, 700, 800,
+  900, 1000, 1200, 1400, 1600, 1800, 2000,
+  2500, 3000, 3500, 4000, 4500, 5000, 6000,
+  7000, 8000, 9000, 1, 12000, 14000, 16000,
+  18000, 2, 25000, 3, 35000, 4,
+  45000, 5, 6, 7, 8, 9,
+  10, 12, 14, 16, 18, 20,
+  25.0, 30.0, 35.0, 40.0, 45.0,
+  50.0, 60.0, 70.0, 80.0, 90.0,
+  1e200,
+};
+
+void Histogram::Clear() {
+  min_ = kBucketLimit[kNumBuckets-1];
+  max_ = 0;
+  num_ = 0;
+  sum_ = 0;
+  sum_squares_ = 0;
+  for (int i = 0; i < kNumBuckets; i++) {
+buckets_[i] = 0;
+  }
+}
+
+void Histogram::Add(double value) {
+  // Linear search is fast enough for our usage in db_bench
+  int b = 0;
+  while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) {
+b++;
+  }
+  buckets_[b] += 1.0;
+  if (min_ > value) min_ = value;
+  if (max_ < value) max_ = value;
+  num_++;
+  sum_ += value;
+  sum_squares_ += (value * value);
+}
+
+void Histogram::Merge(const Histogram& other) {
+  if (other.min_ < min_) min_ = other.min_;
+  if (other.max_ > max_) max_ = other.max_;
+  num_ += other.num_;
+  sum_ += other.sum_;
+  sum_squares_ += other.sum_squares_;
+  for (int b = 0; b < kNumBuckets; b++) {
+buckets_[b] += other.buckets_[b];
+  }
+}
+
+double Histogram::Median() const {
+  return Percentile(50.0);
+}
+
+double Histogram::Percentile(double p) const {
+  double threshold = num_ * (p / 100.0);
+  double sum = 0;
+  for (int b = 0; b < kNumBuckets; b++) {
+sum += buckets_[b];
+if (sum >= threshold) {
+  // Scale linearly within this bucket
+  double left_point = (b == 0) ? 0 : kBucketLimit[b-1];
+  double right_point = kBucketLimit[b];
+  double left_sum = sum - buckets_[b];
+  double right_sum = sum;
+  double pos = (threshold - left_sum) / (right_sum - left_sum);
+  double r = left_point + (right_point - left_point) * pos;
+  if (r < min_) r = min_;
+  if (r > max_) r = max_;
+  return r;
+}
+  }
+  return max_;
+}
+
+double Histogram::Average() const {
+  if (num_ == 0.0) return 0;
+  return sum_ / num_;
+}
+
+double Histogram::StandardDeviation() const {
+  if (num_ == 0.0) return 0;
+  double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_);
+  return sqrt(variance);
+}
+
+std::string Histogram::ToString() const {
+  std::string r;
+  char buf[200];
+  snprintf(buf, sizeof(buf),
+   "Count: %.0f  Average: %.4f  StdDev: %.2f\n",
+   num_, Average(), StandardDeviation());
+  r.append(buf);
+  snprintf(buf, sizeof(buf),
+   "Min: %.4f  Median: %.4f  Max: %.4f\n",
+   (num_ == 0.0 ? 0.0 : min_), Median(), max_);
+  r.append(buf);
+  r.append("--\n");
+  const double mult = 100.0 / num_;
+  double sum = 0;
+  for (int b = 0; b < kNumBuckets; b++) {
+if (buckets_[b] <= 0.0) continue;
+sum += buckets_[b];
+snprintf(buf, sizeof(buf),
+ "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+ ((b == 0) ? 0.0 : kBucketLimit[b-1]),  // left
+ kBucketLimit[b],   // right
+ buckets_[b],   // count
+ mult * 

hadoop git commit: HADOOP-10729. Add tests for PB RPC in case version mismatch of client and server. Contributed by Junping Du.

2015-12-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 832b3cbde -> c4084d9bc


HADOOP-10729. Add tests for PB RPC in case version mismatch of client and 
server. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4084d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4084d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4084d9b

Branch: refs/heads/trunk
Commit: c4084d9bc3b5c20405d9da6623b330d5720b64a1
Parents: 832b3cb
Author: Haohui Mai <whe...@apache.org>
Authored: Wed Dec 9 05:41:44 2015 +0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Wed Dec 9 05:41:44 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../ipc/TestProtoBufRPCCompatibility.java   | 178 +++
 .../hadoop-common/src/test/proto/test.proto |   8 +
 .../src/test/proto/test_rpc_service.proto   |  15 ++
 4 files changed, 204 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4084d9b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e7da77b..4cd295e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1497,6 +1497,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12609. Fix intermittent failure of TestDecayRpcScheduler.
 (Masatake Iwasaki via Arpit Agarwal)
 
+HADOOP-10729. Add tests for PB RPC in case version mismatch of client and
+server. (Junping Du via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4084d9b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
new file mode 100644
index 000..76a93cf
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.OptResponseProto;
+
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.OldProtobufRpcProto;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.NewProtobufRpcProto;
+import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.NewerProtobufRpcProto;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class TestProtoBufRPCCompatibility {
+
+  private static final String ADDRESS = "0.0.0.0";
+  public final static int PORT = 0;
+  private static InetSocketAddress addr;
+  private static RPC.Server server;
+  private static Configuration conf;
+
+  @ProtocolInfo(protocolName = "testProto", protocolVersion = 1)
+  public interface OldRpcService extends
+  OldProtobufRpcProto.BlockingInterface {
+  }
+
+  @Proto

hadoop git commit: HADOOP-10729. Add tests for PB RPC in case version mismatch of client and server. Contributed by Junping Du.

2015-12-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bfe479680 -> 417fbf2dc


HADOOP-10729. Add tests for PB RPC in case version mismatch of client and 
server. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/417fbf2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/417fbf2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/417fbf2d

Branch: refs/heads/branch-2
Commit: 417fbf2dca8a65fb3ff5f5032f982b6509f31db9
Parents: bfe4796
Author: Haohui Mai <whe...@apache.org>
Authored: Wed Dec 9 05:41:44 2015 +0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Wed Dec 9 05:42:24 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../ipc/TestProtoBufRPCCompatibility.java   | 178 +++
 .../hadoop-common/src/test/proto/test.proto |   8 +
 .../src/test/proto/test_rpc_service.proto   |  15 ++
 4 files changed, 204 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/417fbf2d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1f769db..1b23b4e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -878,6 +878,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12609. Fix intermittent failure of TestDecayRpcScheduler.
 (Masatake Iwasaki via Arpit Agarwal)
 
+HADOOP-10729. Add tests for PB RPC in case version mismatch of client and
+server. (Junping Du via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/417fbf2d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
new file mode 100644
index 000..76a93cf
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.OptResponseProto;
+
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.OldProtobufRpcProto;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.NewProtobufRpcProto;
+import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.NewerProtobufRpcProto;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class TestProtoBufRPCCompatibility {
+
+  private static final String ADDRESS = "0.0.0.0";
+  public final static int PORT = 0;
+  private static InetSocketAddress addr;
+  private static RPC.Server server;
+  private static Configuration conf;
+
+  @ProtocolInfo(protocolName = "testProto", protocolVersion = 1)
+  public interface OldRpcService extends
+  OldProtobufRpcProto.BlockingInterface {
+  }
+
+  @Proto

hadoop git commit: HADOOP-10729. Add tests for PB RPC in case version mismatch of client and server. Contributed by Junping Du.

2015-12-08 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3d100163d -> 274af9b57


HADOOP-10729. Add tests for PB RPC in case version mismatch of client and 
server. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/274af9b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/274af9b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/274af9b5

Branch: refs/heads/branch-2.8
Commit: 274af9b5798b051c4d0e2912af553a02b71afe0c
Parents: 3d10016
Author: Haohui Mai <whe...@apache.org>
Authored: Wed Dec 9 05:41:44 2015 +0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Wed Dec 9 05:44:36 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../ipc/TestProtoBufRPCCompatibility.java   | 178 +++
 .../hadoop-common/src/test/proto/test.proto |   8 +
 .../src/test/proto/test_rpc_service.proto   |  15 ++
 4 files changed, 204 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/274af9b5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6c6ba75..04b16b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -863,6 +863,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12609. Fix intermittent failure of TestDecayRpcScheduler.
 (Masatake Iwasaki via Arpit Agarwal)
 
+HADOOP-10729. Add tests for PB RPC in case version mismatch of client and
+server. (Junping Du via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/274af9b5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
new file mode 100644
index 000..76a93cf
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRPCCompatibility.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.OptRequestProto;
+import org.apache.hadoop.ipc.protobuf.TestProtos.OptResponseProto;
+
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.OldProtobufRpcProto;
+import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.NewProtobufRpcProto;
+import 
org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.NewerProtobufRpcProto;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class TestProtoBufRPCCompatibility {
+
+  private static final String ADDRESS = "0.0.0.0";
+  public final static int PORT = 0;
+  private static InetSocketAddress addr;
+  private static RPC.Server server;
+  private static Configuration conf;
+
+  @ProtocolInfo(protocolName = "testProto", protocolVersion = 1)
+  public interface OldRpcService extends
+  OldProtobufRpcProto.BlockingInterface {
+  }
+
+  @P

hadoop git commit: HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after transition to ctest. Contributed by Chris Nauroth.

2015-11-25 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 628c78287 -> 15caaa7d0


HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after 
transition to ctest. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15caaa7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15caaa7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15caaa7d

Branch: refs/heads/branch-2
Commit: 15caaa7d0a13044f2908594ace9196e00c9b
Parents: 628c782
Author: Haohui Mai <whe...@apache.org>
Authored: Wed Nov 25 10:31:39 2015 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Wed Nov 25 10:32:13 2015 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   | 16 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15caaa7d/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index cc59a28..85c0fe4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -162,15 +162,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-
-  
-  
-  
-
-
-
-  
-
+
+  
+  
+  
+  
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15caaa7d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8387571..7564a38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1529,6 +1529,9 @@ Release 2.8.0 - UNRELEASED
 intermittently with various symptoms - debugging patch. (Yongjun Zhang via
 Arpit Agarwal)
 
+HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after
+transition to ctest. (Chris Nauroth via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after transition to ctest. Contributed by Chris Nauroth.

2015-11-25 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 15d577bfb -> 95d5227c7


HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after 
transition to ctest. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95d5227c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95d5227c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95d5227c

Branch: refs/heads/trunk
Commit: 95d5227c75f430da7c77847f31734b34b36157d2
Parents: 15d577b
Author: Haohui Mai <whe...@apache.org>
Authored: Wed Nov 25 10:31:39 2015 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Wed Nov 25 10:32:48 2015 -0800

--
 .../hadoop-hdfs-native-client/pom.xml   | 16 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 2 files changed, 10 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95d5227c/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index a966a28..9fa5fbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -162,15 +162,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   
 
 
-
-  
-  
-  
-
-
-
-  
-
+
+  
+  
+  
+  
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95d5227c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8100c4d..707684b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2388,6 +2388,9 @@ Release 2.8.0 - UNRELEASED
 intermittently with various symptoms - debugging patch. (Yongjun Zhang via
 Arpit Agarwal)
 
+HDFS-9459. hadoop-hdfs-native-client fails test build on Windows after
+transition to ctest. (Chris Nauroth via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9451. Clean up depreated umasks and related unit tests. Contributed by Wei-Chiu Chuang.

2015-11-25 Thread wheat9
ject/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 707684b..957087e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -868,6 +868,9 @@ Trunk (Unreleased)
   HDFS-9348. Erasure Coding: DFS GetErasureCodingPolicy API on a 
non-existent 
   file should be handled properly. (Rakesh R via umamahesh)
 
+  HDFS-9451. Clean up depreated umasks and related unit tests.
+  (Wei-Chiu Chuang via wheat9)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b21dffb1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 425c82e..27af49c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -83,31 +83,18 @@ public class TestPermission {
 Configuration conf = new Configuration();
 FsPermission.setUMask(conf, perm);
 assertEquals(18, FsPermission.getUMask(conf).toShort());
-
-// Test 2 - old configuration key set with decimal 
-// umask value should be handled
-perm = new FsPermission((short)18);
-conf = new Configuration();
-conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
-assertEquals(18, FsPermission.getUMask(conf).toShort());
-
-// Test 3 - old configuration key overrides the new one
-conf = new Configuration();
-conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
-conf.set(FsPermission.UMASK_LABEL, "000");
-assertEquals(18, FsPermission.getUMask(conf).toShort());
-
-// Test 4 - new configuration key is handled
+
+// Test 2 - new configuration key is handled
 conf = new Configuration();
 conf.set(FsPermission.UMASK_LABEL, "022");
 assertEquals(18, FsPermission.getUMask(conf).toShort());
 
-// Test 5 - equivalent valid umask
+// Test 3 - equivalent valid umask
 conf = new Configuration();
 conf.set(FsPermission.UMASK_LABEL, "0022");
 assertEquals(18, FsPermission.getUMask(conf).toShort());
 
-// Test 6 - invalid umask
+// Test 4 - invalid umask
 conf = new Configuration();
 conf.set(FsPermission.UMASK_LABEL, "1222");
 try {
@@ -117,7 +104,7 @@ public class TestPermission {
  //pass, exception successfully trigger
 }
 
-// Test 7 - invalid umask
+// Test 5 - invalid umask
 conf = new Configuration();
 conf.set(FsPermission.UMASK_LABEL, "01222");
 try {



hadoop git commit: HDFS-5165. Remove the TotalFiles metrics. Contributed by Akira AJISAKA.

2015-11-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 17b1a5482 -> db4cab21f


HDFS-5165. Remove the TotalFiles metrics. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db4cab21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db4cab21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db4cab21

Branch: refs/heads/trunk
Commit: db4cab21f4c661f68d6a6dec50aae00d75168486
Parents: 17b1a54
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Nov 24 11:41:57 2015 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Nov 24 11:41:57 2015 -0800

--
 .../hadoop-common/src/site/markdown/Metrics.md|  1 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  2 ++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  |  8 
 .../hadoop/hdfs/server/namenode/NameNodeMXBean.java   | 10 --
 4 files changed, 2 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db4cab21/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 9e02ffa..a91bbad 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -231,7 +231,6 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last 
time standby NameNode load edit log. In active NameNode, set to 0 |
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
-| `TotalFiles` | Deprecated: Use FilesTotal instead |
 | `MissingReplOneBlocks` | Current number of missing blocks with replication 
factor 1 |
 | `NumFilesUnderConstruction` | Current number of files under construction |
 | `NumActiveClients` | Current number of active clients holding lease |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db4cab21/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b441b35..639b8f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -44,6 +44,8 @@ Trunk (Unreleased)
 
 HDFS-9278. Fix preferredBlockSize typo in OIV XML output. (Nicole Pazmany 
via wang)
 
+HDFS-5165. Remove the TotalFiles metrics. (Akira Ajisaka via wheat9)
+
   NEW FEATURES
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db4cab21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8d77630..0559288 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6001,14 +6001,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 return getBlocksTotal();
   }
 
-  /** @deprecated Use {@link #getFilesTotal()} instead. */
-  @Deprecated
-  @Override // NameNodeMXBean
-  @Metric
-  public long getTotalFiles() {
-return getFilesTotal();
-  }
-
   @Override // NameNodeMXBean
   public long getNumberOfMissingBlocks() {
 return getMissingBlocksCount();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db4cab21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index 3f78155..c0d256a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
@@ -139,16 +139,6 @@ public interface NameNodeMXBean {

hadoop git commit: HDFS-8335. FSNamesystem should construct FSPermissionChecker only if permission is enabled. Contributed by Gabor Liptak.

2015-11-24 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk fe5624b85 -> 977e0b3c4


HDFS-8335. FSNamesystem should construct FSPermissionChecker only if permission 
is enabled. Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/977e0b3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/977e0b3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/977e0b3c

Branch: refs/heads/trunk
Commit: 977e0b3c4ce76746a3d8590d2d790fdc96c86ca5
Parents: fe5624b
Author: Haohui Mai <whe...@apache.org>
Authored: Tue Nov 24 13:07:26 2015 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Tue Nov 24 13:14:49 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  6 +
 .../server/namenode/FSDirStatAndListingOp.java  | 25 +---
 2 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/977e0b3c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d9df2e..92897b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1734,6 +1734,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-9318. considerLoad factor can be improved. (Kuhu Shukla via kihwal)
 
+HDFS-8335. FSNamesystem should construct FSPermissionChecker only if
+permission is enabled. (Gabor Liptak via wheat9)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
@@ -2376,6 +2379,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8855. Webhdfs client leaks active NameNode connections.
 (Xiaobing Zhou via xyao)
 
+HDFS-8335. FSNamesystem should construct FSPermissionChecker only if
+permission is enabled. (Gabor Liptak via wheat9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/977e0b3c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index a1ac1a7..d8baa6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -52,12 +52,17 @@ import static org.apache.hadoop.util.Time.now;
 class FSDirStatAndListingOp {
   static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
   byte[] startAfter, boolean needLocation) throws IOException {
-FSPermissionChecker pc = fsd.getPermissionChecker();
 byte[][] pathComponents = FSDirectory
 .getPathComponentsForReservedPath(srcArg);
 final String startAfterString = new String(startAfter, Charsets.UTF_8);
-final String src = fsd.resolvePath(pc, srcArg, pathComponents);
-final INodesInPath iip = fsd.getINodesInPath(src, true);
+String src = null;
+
+if (fsd.isPermissionEnabled()) {
+  FSPermissionChecker pc = fsd.getPermissionChecker();
+  src = fsd.resolvePath(pc, srcArg, pathComponents);
+} else {
+  src = FSDirectory.resolvePath(srcArg, pathComponents, fsd);
+}
 
 // Get file name when startAfter is an INodePath
 if (FSDirectory.isReservedName(startAfterString)) {
@@ -74,8 +79,10 @@ class FSDirStatAndListingOp {
   }
 }
 
+final INodesInPath iip = fsd.getINodesInPath(src, true);
 boolean isSuperUser = true;
 if (fsd.isPermissionEnabled()) {
+  FSPermissionChecker pc = fsd.getPermissionChecker();
   if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
 fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
   } else {
@@ -103,15 +110,17 @@ class FSDirStatAndListingOp {
 if (!DFSUtil.isValidName(src)) {
   throw new InvalidPathException("Invalid file name: " + src);
 }
-FSPermissionChecker pc = fsd.getPermissionChecker();
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
-final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
 if (fsd.isPermissionEnabled()) {
+  FSPermissionChecker pc = fsd.getPermissionChecker();
+  src = fsd.resolvePath(pc, src, pathComponents);
+  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
   f

[1/2] hadoop git commit: Revert "HADOOP-12572. Update Hadoop's lz4 to r131. Contributed by Kevin Bowling."

2015-11-23 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d13b646c6 -> 75b007260


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75b00726/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h
index 3e74002..44ada14 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h
@@ -1,8 +1,7 @@
 /*
LZ4 - Fast LZ compression algorithm
Header File
-   Copyright (C) 2011-2015, Yann Collet.
-
+   Copyright (C) 2011-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
Redistribution and use in source and binary forms, with or without
@@ -29,7 +28,7 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
You can contact the author at :
-   - LZ4 source repository : https://github.com/Cyan4973/lz4
+   - LZ4 source repository : http://code.google.com/p/lz4/
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
 */
 #pragma once
@@ -39,22 +38,22 @@ extern "C" {
 #endif
 
 /*
- * lz4.h provides block compression functions, and gives full buffer control 
to programmer.
- * If you need to generate inter-operable compressed data (respecting LZ4 
frame specification),
- * and can let the library handle its own memory, please use lz4frame.h 
instead.
+ * lz4.h provides raw compression format functions, for optimal performance 
and integration into programs.
+ * If you need to generate data using an inter-operable format (respecting the 
framing specification),
+ * please use lz4frame.h instead.
 */
 
 /**
-*  Version
+   Version
 **/
-#define LZ4_VERSION_MAJOR1/* for breaking interface changes  */
-#define LZ4_VERSION_MINOR7/* for new (non-breaking) interface 
capabilities */
+#define LZ4_VERSION_MAJOR1/* for major interface/format changes  */
+#define LZ4_VERSION_MINOR3/* for minor interface/format changes  */
 #define LZ4_VERSION_RELEASE  1/* for tweaks, bug-fixes, or development */
 #define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR 
*100 + LZ4_VERSION_RELEASE)
 int LZ4_versionNumber (void);
 
 /**
-*  Tuning parameter
+   Tuning parameter
 **/
 /*
  * LZ4_MEMORY_USAGE :
@@ -67,90 +66,77 @@ int LZ4_versionNumber (void);
 
 
 /**
-*  Simple Functions
+   Simple Functions
 **/
 
-int LZ4_compress_default(const char* source, char* dest, int sourceSize, int 
maxDestSize);
+int LZ4_compress(const char* source, char* dest, int sourceSize);
 int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, 
int maxDecompressedSize);
 
 /*
-LZ4_compress_default() :
-Compresses 'sourceSize' bytes from buffer 'source'
-into already allocated 'dest' buffer of size 'maxDestSize'.
-Compression is guaranteed to succeed if 'maxDestSize' >= 
LZ4_compressBound(sourceSize).
-It also runs faster, so it's a recommended setting.
-If the function cannot compress 'source' into a more limited 'dest' budget,
-compression stops *immediately*, and the function result is zero.
-As a consequence, 'dest' content is not valid.
-This function never writes outside 'dest' buffer, nor read outside 
'source' buffer.
-sourceSize  : Max supported value is LZ4_MAX_INPUT_VALUE
-maxDestSize : full or partial size of buffer 'dest' (which must be 
already allocated)
-return : the number of bytes written into buffer 'dest' (necessarily 
<= maxOutputSize)
-  or 0 if compression fails
+LZ4_compress() :
+Compresses 'sourceSize' bytes from 'source' into 'dest'.
+Destination buffer must be already allocated,
+and must be sized to handle worst cases situations (input data not 
compressible)
+Worst case size evaluation is provided by function LZ4_compressBound()
+inputSize : Max supported value is LZ4_MAX_INPUT_SIZE
+return : the number of bytes written in buffer dest
+ or 0 if the compression fails
 
 LZ4_decompress_safe() :
-compressedSize : is the precise full size of the compressed block.
-maxDecompressedSize : is the size of destination buffer, which must be 
already allocated.
-return : the number of bytes decompressed into destination buffer 
(necessarily <= maxDecompressedSize)
- If destination buffer is not large enough, decoding will stop and 
output an error code (<0).
+

[2/2] hadoop git commit: Revert "HADOOP-12572. Update Hadoop's lz4 to r131. Contributed by Kevin Bowling."

2015-11-23 Thread wheat9
Revert "HADOOP-12572. Update Hadoop's lz4 to r131. Contributed by Kevin 
Bowling."

This reverts commit 74d67b2250ef5d7be5ea44a0c2acdf769f5ed8cc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75b00726
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75b00726
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75b00726

Branch: refs/heads/branch-2
Commit: 75b007260bbc352a18de18fbdd41681b62a51734
Parents: d13b646
Author: Haohui Mai <whe...@apache.org>
Authored: Mon Nov 23 14:59:55 2015 -0800
Committer: Haohui Mai <whe...@apache.org>
Committed: Mon Nov 23 14:59:55 2015 -0800

--
 LICENSE.txt |5 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |2 -
 .../hadoop/io/compress/lz4/Lz4Compressor.c  |4 +-
 .../hadoop/io/compress/lz4/Lz4Decompressor.c|2 +-
 .../src/org/apache/hadoop/io/compress/lz4/lz4.c | 1184 +++---
 .../src/org/apache/hadoop/io/compress/lz4/lz4.h |  281 ++---
 .../org/apache/hadoop/io/compress/lz4/lz4hc.c   |  906 --
 .../org/apache/hadoop/io/compress/lz4/lz4hc.h   |  220 ++--
 8 files changed, 1233 insertions(+), 1371 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75b00726/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 9b0c034..23c6f37 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -257,8 +257,7 @@ For 
src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,l
 /*
LZ4 - Fast LZ compression algorithm
Header File
-   Copyright (C) 2011-2015, Yann Collet.
-
+   Copyright (C) 2011-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
Redistribution and use in source and binary forms, with or without
@@ -285,7 +284,7 @@ For 
src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,l
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
You can contact the author at :
-   - LZ4 source repository : https://github.com/Cyan4973/lz4
+   - LZ4 source repository : http://code.google.com/p/lz4/
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
 */
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75b00726/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 40c4170..a300c50 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -341,8 +341,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12575. Add build instruction for docker toolbox instead of
 boot2docker(Kai Sasaki via ozawa)
 
-HADOOP-12572. Update Hadoop's lz4 to r131. (Kevin Bowling via wheat9)
-
 HADOOP-10035. Cleanup TestFilterFileSystem. (Suresh Srinivas via wheat9)
 
 HADOOP-10555. Add offset support to MurmurHash.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75b00726/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
index 26b43d6..f742384 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
@@ -73,7 +73,7 @@ JNIEXPORT jint JNICALL 
Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_comp
 return (jint)0;
   }
 
-  compressed_direct_buf_len = LZ4_compress_default(uncompressed_bytes, 
compressed_bytes, uncompressed_direct_buf_len, 
LZ4_compressBound(uncompressed_direct_buf_len));
+  compressed_direct_buf_len = LZ4_compress(uncompressed_bytes, 
compressed_bytes, uncompressed_direct_buf_len);
   if (compressed_direct_buf_len < 0){
 THROW(env, "java/lang/InternalError", "LZ4_compress failed");
   }
@@ -115,7 +115,7 @@ JNIEXPORT jint JNICALL 
Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_comp
 return (jint)0;
   }
 
-  compressed_direct_buf_len = LZ4_compress_HC(uncompressed_bytes, 
compressed_bytes, uncompressed_direct_buf_len, 
LZ4_compressBound(uncompressed_direct_buf_len), 0);
+  compressed_direct_buf_len = LZ4_compressHC(uncompressed_bytes, 
compressed_bytes, uncompressed_direct_buf_len);
   if (compressed_direct_buf_len < 0){
 THROW(env, &

  1   2   3   4   5   6   7   8   9   10   >