hadoop git commit: HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full situation. Contributed by Arpit Agarwal.

2018-05-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 43beb -> 5dcd57cbe


HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full 
situation. Contributed by Arpit Agarwal.

(cherry picked from commit bcc8e76badc1341a6cf995c8e44fa5e422158de8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5dcd57cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5dcd57cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5dcd57cb

Branch: refs/heads/branch-2
Commit: 5dcd57cbeada2915678edbea5c5f0cc20486ddd9
Parents: 43b
Author: Kihwal Lee 
Authored: Tue May 22 11:29:54 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 22 11:29:54 2018 -0500

--
 .../org/apache/hadoop/util/DiskChecker.java |  46 -
 .../org/apache/hadoop/util/TestDiskChecker.java | 102 ---
 .../hadoop/util/TestDiskCheckerWithDiskIo.java  | 173 +++
 3 files changed, 217 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dcd57cb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index 8563232..c47de21 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -74,12 +74,30 @@ public class DiskChecker {
* @throws DiskErrorException
*/
   public static void checkDir(File dir) throws DiskErrorException {
+checkDirInternal(dir);
+  }
+
+  /**
+   * Create the directory if it doesn't exist and check that dir is
+   * readable, writable and executable. Perform some disk IO to
+   * ensure that the disk is usable for writes.
+   *
+   * @param dir
+   * @throws DiskErrorException
+   */
+  public static void checkDirWithDiskIo(File dir)
+  throws DiskErrorException {
+checkDirInternal(dir);
+doDiskIo(dir);
+  }
+
+  private static void checkDirInternal(File dir)
+  throws DiskErrorException {
 if (!mkdirsWithExistsCheck(dir)) {
   throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
 }
 checkAccessByFileMethods(dir);
-doDiskIo(dir);
   }
 
   /**
@@ -94,10 +112,34 @@ public class DiskChecker {
*/
   public static void checkDir(LocalFileSystem localFS, Path dir,
   FsPermission expected)
+  throws DiskErrorException, IOException {
+checkDirInternal(localFS, dir, expected);
+  }
+
+
+  /**
+   * Create the local directory if necessary, also ensure permissions
+   * allow it to be read from and written into. Perform some diskIO
+   * to ensure that the disk is usable for writes. 
+   *
+   * @param localFS local filesystem
+   * @param dir directory
+   * @param expected permission
+   * @throws DiskErrorException
+   * @throws IOException
+   */  
+  public static void checkDirWithDiskIo(LocalFileSystem localFS, Path dir,
+FsPermission expected) 
+  throws DiskErrorException, IOException {
+checkDirInternal(localFS, dir, expected);
+doDiskIo(localFS.pathToFile(dir));
+  }  
+
+  private static void checkDirInternal(LocalFileSystem localFS, Path dir,
+   FsPermission expected)
   throws DiskErrorException, IOException {
 mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
 checkAccessByFileMethods(localFS.pathToFile(dir));
-doDiskIo(localFS.pathToFile(dir));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5dcd57cb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index bd8e1dd..6b6c6c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.util;
 
 import java.io.*;
 import java.nio.file.Files;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.util.DiskChecker.FileIoProvider;
 import org.junit.After;
@@ -214,105 +213,4 @@ public class TestDiskChecker {
 }
  

hadoop git commit: HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full situation. Contributed by Arpit Agarwal.

2018-05-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8e1107d63 -> 1c407d327


HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full 
situation. Contributed by Arpit Agarwal.

(cherry picked from commit bcc8e76badc1341a6cf995c8e44fa5e422158de8)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c407d32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c407d32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c407d32

Branch: refs/heads/branch-2.8
Commit: 1c407d327a655576d2d0d12fd408db5ba0e85de8
Parents: 8e1107d
Author: Kihwal Lee 
Authored: Tue May 22 11:36:25 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 22 11:36:25 2018 -0500

--
 .../org/apache/hadoop/util/DiskChecker.java |  46 -
 .../org/apache/hadoop/util/TestDiskChecker.java | 101 ---
 .../hadoop/util/TestDiskCheckerWithDiskIo.java  | 173 +++
 3 files changed, 217 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c407d32/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index 8563232..c47de21 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -74,12 +74,30 @@ public class DiskChecker {
* @throws DiskErrorException
*/
   public static void checkDir(File dir) throws DiskErrorException {
+checkDirInternal(dir);
+  }
+
+  /**
+   * Create the directory if it doesn't exist and check that dir is
+   * readable, writable and executable. Perform some disk IO to
+   * ensure that the disk is usable for writes.
+   *
+   * @param dir
+   * @throws DiskErrorException
+   */
+  public static void checkDirWithDiskIo(File dir)
+  throws DiskErrorException {
+checkDirInternal(dir);
+doDiskIo(dir);
+  }
+
+  private static void checkDirInternal(File dir)
+  throws DiskErrorException {
 if (!mkdirsWithExistsCheck(dir)) {
   throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
 }
 checkAccessByFileMethods(dir);
-doDiskIo(dir);
   }
 
   /**
@@ -94,10 +112,34 @@ public class DiskChecker {
*/
   public static void checkDir(LocalFileSystem localFS, Path dir,
   FsPermission expected)
+  throws DiskErrorException, IOException {
+checkDirInternal(localFS, dir, expected);
+  }
+
+
+  /**
+   * Create the local directory if necessary, also ensure permissions
+   * allow it to be read from and written into. Perform some diskIO
+   * to ensure that the disk is usable for writes. 
+   *
+   * @param localFS local filesystem
+   * @param dir directory
+   * @param expected permission
+   * @throws DiskErrorException
+   * @throws IOException
+   */  
+  public static void checkDirWithDiskIo(LocalFileSystem localFS, Path dir,
+FsPermission expected) 
+  throws DiskErrorException, IOException {
+checkDirInternal(localFS, dir, expected);
+doDiskIo(localFS.pathToFile(dir));
+  }  
+
+  private static void checkDirInternal(LocalFileSystem localFS, Path dir,
+   FsPermission expected)
   throws DiskErrorException, IOException {
 mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
 checkAccessByFileMethods(localFS.pathToFile(dir));
-doDiskIo(localFS.pathToFile(dir));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c407d32/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index 43bd183..ffeee4d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -203,105 +203,4 @@ public class TestDiskChecker {
 }
 localDir.delete();
   }
-
-  /**
-   * Verify DiskChecker ignores at least 2 transient file creation errors.
-   */
-  @Te

hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9502b47bd -> e3236a968


Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3236a96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3236a96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3236a96

Branch: refs/heads/trunk
Commit: e3236a9680709de7a95ffbc11b20e1bdc95a8605
Parents: 9502b47
Author: Kihwal Lee 
Authored: Tue May 29 14:15:12 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:15:12 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java | 10 +
 .../java/org/apache/hadoop/util/TestRunJar.java | 42 
 2 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3236a96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index f1b643c..4c94dbc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -117,12 +117,17 @@ public class RunJar {
   throws IOException {
 try (JarInputStream jar = new JarInputStream(inputStream)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   for (JarEntry entry = jar.getNextJarEntry();
entry != null;
entry = jar.getNextJarEntry()) {
 if (!entry.isDirectory() &&
 unpackRegex.matcher(entry.getName()).matches()) {
   File file = new File(toDir, entry.getName());
+  if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+throw new IOException("expanding " + entry.getName()
++ " would create file outside of " + toDir);
+  }
   ensureDirectory(file.getParentFile());
   try (OutputStream out = new FileOutputStream(file)) {
 IOUtils.copyBytes(jar, out, BUFFER_SIZE);
@@ -182,6 +187,7 @@ public class RunJar {
   throws IOException {
 try (JarFile jar = new JarFile(jarFile)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -189,6 +195,10 @@ public class RunJar {
 unpackRegex.matcher(entry.getName()).matches()) {
   try (InputStream in = jar.getInputStream(entry)) {
 File file = new File(toDir, entry.getName());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 ensureDirectory(file.getParentFile());
 try (OutputStream out = new FileOutputStream(file)) {
   IOUtils.copyBytes(in, out, BUFFER_SIZE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3236a96/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index ea07b97..a8c27d4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
@@ -32,6 +33,7 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.util.Random;
 import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
@@ -255,4 +257,44 @@ public class TestRunJar {
 // it should not throw an exception
 verify(runJar, times(0)).unJar(any(File.class), any(File.class));
   }
+
+  @

hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d5708bbcd -> 65e55097d


Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65e55097
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65e55097
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65e55097

Branch: refs/heads/branch-3.0
Commit: 65e55097da2bb3f2fbdf9ba1946da25fe58bec98
Parents: d5708bb
Author: Kihwal Lee 
Authored: Tue May 29 14:30:29 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:30:29 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java |  5 +++
 .../java/org/apache/hadoop/util/TestRunJar.java | 36 
 2 files changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65e55097/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 19b51ad..678e459 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -109,6 +109,7 @@ public class RunJar {
   throws IOException {
 try (JarFile jar = new JarFile(jarFile)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -117,6 +118,10 @@ public class RunJar {
   try (InputStream in = jar.getInputStream(entry)) {
 File file = new File(toDir, entry.getName());
 ensureDirectory(file.getParentFile());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 try (OutputStream out = new FileOutputStream(file)) {
   IOUtils.copyBytes(in, out, BUFFER_SIZE);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65e55097/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 7b61b32..cb2cfa8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -20,12 +20,15 @@ package org.apache.hadoop.util;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -165,4 +168,37 @@ public class TestRunJar {
 runJar.run(args);
 // it should not throw an exception
   }
+
+  @Test
+  public void testUnJar2() throws IOException {
+// make a simple zip
+File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
+byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
+.getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+je = new JarEntry("../outside.path");
+data = "any data here".getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+jstream.close();
+
+File unjarDir = getUnjarDir("unjar-path");
+
+// Unjar everything
+try {
+  RunJar.unJar(jarFile, unjarDir);
+  fail("unJar should throw IOException.");
+} catch (IOException e) {
+  GenericTestUtils.assertExceptionContains(
+ 

hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 09fbbff69 -> 6d7d192e4


Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

(cherry picked from commit 65e55097da2bb3f2fbdf9ba1946da25fe58bec98)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d7d192e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d7d192e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d7d192e

Branch: refs/heads/branch-2
Commit: 6d7d192e4799b51931e55217e02baec14d49607b
Parents: 09fbbff
Author: Kihwal Lee 
Authored: Tue May 29 14:32:58 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:33:31 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java |  5 +++
 .../java/org/apache/hadoop/util/TestRunJar.java | 36 
 2 files changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d7d192e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 19b51ad..678e459 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -109,6 +109,7 @@ public class RunJar {
   throws IOException {
 try (JarFile jar = new JarFile(jarFile)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -117,6 +118,10 @@ public class RunJar {
   try (InputStream in = jar.getInputStream(entry)) {
 File file = new File(toDir, entry.getName());
 ensureDirectory(file.getParentFile());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 try (OutputStream out = new FileOutputStream(file)) {
   IOUtils.copyBytes(in, out, BUFFER_SIZE);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d7d192e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 7b61b32..cb2cfa8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -20,12 +20,15 @@ package org.apache.hadoop.util;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -165,4 +168,37 @@ public class TestRunJar {
 runJar.run(args);
 // it should not throw an exception
   }
+
+  @Test
+  public void testUnJar2() throws IOException {
+// make a simple zip
+File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
+byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
+.getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+je = new JarEntry("../outside.path");
+data = "any data here".getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+jstream.close();
+
+File unjarDir = getUnjarDir("unjar-path");
+
+// Unjar everything
+try {
+  RunJar.unJar(jarFile, unjarDir);
+  fail("unJar should throw IOException.");
+} catch (IOException e) {
+  Gen

hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 c3dce2620 -> 6a4ae6f6e


Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

(cherry picked from commit 65e55097da2bb3f2fbdf9ba1946da25fe58bec98)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a4ae6f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a4ae6f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a4ae6f6

Branch: refs/heads/branch-2.9
Commit: 6a4ae6f6eeed1392a4828a5721fa1499f65bdde4
Parents: c3dce26
Author: Kihwal Lee 
Authored: Tue May 29 14:35:28 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:35:28 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java |  5 +++
 .../java/org/apache/hadoop/util/TestRunJar.java | 36 
 2 files changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a4ae6f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 19b51ad..678e459 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -109,6 +109,7 @@ public class RunJar {
   throws IOException {
 try (JarFile jar = new JarFile(jarFile)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -117,6 +118,10 @@ public class RunJar {
   try (InputStream in = jar.getInputStream(entry)) {
 File file = new File(toDir, entry.getName());
 ensureDirectory(file.getParentFile());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 try (OutputStream out = new FileOutputStream(file)) {
   IOUtils.copyBytes(in, out, BUFFER_SIZE);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a4ae6f6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 7b61b32..cb2cfa8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -20,12 +20,15 @@ package org.apache.hadoop.util;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -165,4 +168,37 @@ public class TestRunJar {
 runJar.run(args);
 // it should not throw an exception
   }
+
+  @Test
+  public void testUnJar2() throws IOException {
+// make a simple zip
+File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
+byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
+.getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+je = new JarEntry("../outside.path");
+data = "any data here".getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+jstream.close();
+
+File unjarDir = getUnjarDir("unjar-path");
+
+// Unjar everything
+try {
+  RunJar.unJar(jarFile, unjarDir);
+  fail("unJar should throw IOException.");
+} catch (IOException e) {
+  Gen

hadoop git commit: dditional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 74c7024cc -> 3808e5d62


dditional check when unpacking archives. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3808e5d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3808e5d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3808e5d6

Branch: refs/heads/branch-2.8
Commit: 3808e5d62aa21d7393d98fbc9d54b9ad1e79ab99
Parents: 74c7024
Author: Kihwal Lee 
Authored: Tue May 29 14:39:53 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:39:53 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java |  5 +++
 .../java/org/apache/hadoop/util/TestRunJar.java | 38 +++-
 2 files changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3808e5d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 52cf05c..a56f6ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -98,6 +98,7 @@ public class RunJar {
 JarFile jar = new JarFile(jarFile);
 try {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -107,6 +108,10 @@ public class RunJar {
   try {
 File file = new File(toDir, entry.getName());
 ensureDirectory(file.getParentFile());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 OutputStream out = new FileOutputStream(file);
 try {
   IOUtils.copyBytes(in, out, 8192);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3808e5d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 7262534..20650c0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.util;
 
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
@@ -25,6 +26,8 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -186,4 +189,37 @@ public class TestRunJar extends TestCase {
 
 return jarFile;
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testUnJar2() throws IOException {
+// make a simple zip
+File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
+byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
+.getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+je = new JarEntry("../outside.path");
+data = "any data here".getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+jstream.close();
+
+File unjarDir = new File(TEST_ROOT_DIR, "unjar-path");
+
+// Unjar everything
+try {
+  RunJar.unJar(jarFile, unjarDir);
+  fail("unJar should throw IOException.");
+} catch (IOException e) {
+  GenericTestUtils.assertExceptionContains(
+  "would create file outside of", e);
+}
+  }
+}


-
To unsubscribe,

hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 5b57f9cae -> eaa2b8035


Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eaa2b803
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eaa2b803
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eaa2b803

Branch: refs/heads/branch-2.7
Commit: eaa2b8035b584dfcf7c79a33484eb2dffd3fdb11
Parents: 5b57f9c
Author: Kihwal Lee 
Authored: Tue May 29 14:47:55 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 14:48:46 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java |  5 +++
 .../java/org/apache/hadoop/util/TestRunJar.java | 39 +++-
 2 files changed, 43 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaa2b803/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 4b26b76..a3b5b0b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -93,6 +93,7 @@ public class RunJar {
 throws IOException {
 JarFile jar = new JarFile(jarFile);
 try {
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -102,6 +103,10 @@ public class RunJar {
   try {
 File file = new File(toDir, entry.getName());
 ensureDirectory(file.getParentFile());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 OutputStream out = new FileOutputStream(file);
 try {
   IOUtils.copyBytes(in, out, 8192);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaa2b803/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index f592d04..b2a6537 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.util;
 
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
@@ -25,6 +26,8 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
@@ -32,6 +35,7 @@ import java.util.zip.ZipEntry;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -169,4 +173,37 @@ public class TestRunJar extends TestCase {
 
 return jarFile;
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testUnJar2() throws IOException {
+// make a simple zip
+File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
+JarOutputStream jstream =
+new JarOutputStream(new FileOutputStream(jarFile));
+JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
+byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
+.getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+je = new JarEntry("../outside.path");
+data = "any data here".getBytes(StandardCharsets.UTF_8);
+je.setSize(data.length);
+jstream.putNextEntry(je);
+jstream.write(data);
+jstream.closeEntry();
+jstream.close();
+
+File unjarDir = new File(TEST_ROOT_DIR, "unjar-path");
+
+// Unjar everything
+try {
+  RunJar.unJar(jarFile, unjarDir);
+  fail("unJar should throw IOException.&quo

hadoop git commit: Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

2018-05-29 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a1fd04c4f -> 11a425d11


Additional check when unpacking archives. Contributed by Wilfred Spiegelenburg.

(cherry picked from commit e3236a9680709de7a95ffbc11b20e1bdc95a8605)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11a425d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11a425d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11a425d1

Branch: refs/heads/branch-3.1
Commit: 11a425d11a329010d0ff8255ecbcd1eb51b642e3
Parents: a1fd04c
Author: Kihwal Lee 
Authored: Tue May 29 15:02:33 2018 -0500
Committer: Kihwal Lee 
Committed: Tue May 29 15:02:33 2018 -0500

--
 .../java/org/apache/hadoop/util/RunJar.java | 10 +
 .../java/org/apache/hadoop/util/TestRunJar.java | 46 +++-
 2 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11a425d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 9dd770c..239d464 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -113,12 +113,17 @@ public class RunJar {
   throws IOException {
 try (JarInputStream jar = new JarInputStream(inputStream)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   for (JarEntry entry = jar.getNextJarEntry();
entry != null;
entry = jar.getNextJarEntry()) {
 if (!entry.isDirectory() &&
 unpackRegex.matcher(entry.getName()).matches()) {
   File file = new File(toDir, entry.getName());
+  if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+throw new IOException("expanding " + entry.getName()
++ " would create file outside of " + toDir);
+  }
   ensureDirectory(file.getParentFile());
   try (OutputStream out = new FileOutputStream(file)) {
 IOUtils.copyBytes(jar, out, BUFFER_SIZE);
@@ -178,6 +183,7 @@ public class RunJar {
   throws IOException {
 try (JarFile jar = new JarFile(jarFile)) {
   int numOfFailedLastModifiedSet = 0;
+  String targetDirPath = toDir.getCanonicalPath() + File.separator;
   Enumeration entries = jar.entries();
   while (entries.hasMoreElements()) {
 final JarEntry entry = entries.nextElement();
@@ -185,6 +191,10 @@ public class RunJar {
 unpackRegex.matcher(entry.getName()).matches()) {
   try (InputStream in = jar.getInputStream(entry)) {
 File file = new File(toDir, entry.getName());
+if (!file.getCanonicalPath().startsWith(targetDirPath)) {
+  throw new IOException("expanding " + entry.getName()
+  + " would create file outside of " + toDir);
+}
 ensureDirectory(file.getParentFile());
 try (OutputStream out = new FileOutputStream(file)) {
   IOUtils.copyBytes(in, out, BUFFER_SIZE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11a425d1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 19485d6..237751c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.util;
 
+import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
@@ -28,6 +31,7 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.util.Random;
 import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
@@ -222,4 +226,44 @@ public c

hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ba1ab08fd -> ccf2db7fc


HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccf2db7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccf2db7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccf2db7f

Branch: refs/heads/trunk
Commit: ccf2db7fc2688d262df3309007cb12a4dfedc179
Parents: ba1ab08
Author: Kihwal Lee 
Authored: Thu Jul 19 11:13:37 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:13:37 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccf2db7f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 46e36b3..bba8152 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a607c02f1 -> a147098c4


HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.

(cherry picked from commit ccf2db7fc2688d262df3309007cb12a4dfedc179)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a147098c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a147098c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a147098c

Branch: refs/heads/branch-3.1
Commit: a147098c4fab435f1c8962e1fa5b22bf6a3b84f0
Parents: a607c02
Author: Kihwal Lee 
Authored: Thu Jul 19 11:19:19 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:19:19 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a147098c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 46e36b3..bba8152 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 177620897 -> 00ac5ba90


HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.

(cherry picked from commit ccf2db7fc2688d262df3309007cb12a4dfedc179)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00ac5ba9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00ac5ba9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00ac5ba9

Branch: refs/heads/branch-3.0
Commit: 00ac5ba90b66557447c24c20d7c379ef995662ad
Parents: 1776208
Author: Kihwal Lee 
Authored: Thu Jul 19 11:27:38 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:27:38 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ac5ba9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 46e36b3..bba8152 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4c2ab2213 -> cf255dc01


HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.

(cherry picked from commit ccf2db7fc2688d262df3309007cb12a4dfedc179)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf255dc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf255dc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf255dc0

Branch: refs/heads/branch-2
Commit: cf255dc01ff302932f75143d002a9f104f72e1c0
Parents: 4c2ab22
Author: Kihwal Lee 
Authored: Thu Jul 19 11:29:58 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:29:58 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf255dc0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index f015021..7b387ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 cf6eeea58 -> 510287fa2


HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.

(cherry picked from commit ccf2db7fc2688d262df3309007cb12a4dfedc179)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/510287fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/510287fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/510287fa

Branch: refs/heads/branch-2.9
Commit: 510287fa2942a26f9a810949582419edc74cc9d5
Parents: cf6eeea
Author: Kihwal Lee 
Authored: Thu Jul 19 11:32:16 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:32:16 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/510287fa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index f015021..7b387ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 21aa980ee -> 4b1ee3685


HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.

(cherry picked from commit ccf2db7fc2688d262df3309007cb12a4dfedc179)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b1ee368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b1ee368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b1ee368

Branch: refs/heads/branch-2.8
Commit: 4b1ee36857c478971153f6151c0902623fb6da7d
Parents: 21aa980
Author: Kihwal Lee 
Authored: Thu Jul 19 11:35:28 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:35:28 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b1ee368/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 2b47d41..97d0426 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -560,23 +560,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. Contributed by Lin Yiqun.

2016-03-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5de848cd5 -> d45740178


HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. 
Contributed by Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4574017
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4574017
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4574017

Branch: refs/heads/trunk
Commit: d4574017845cfa7521e703f80efd404afd09b8c4
Parents: 5de848c
Author: Kihwal Lee 
Authored: Tue Mar 15 10:52:47 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Mar 15 10:52:47 2016 -0500

--
 .../hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4574017/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 234bc7b..7c0ed7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -291,6 +291,11 @@ public class TestStandbyCheckpoints {
*/
   @Test(timeout=6)
   public void testCheckpointCancellationDuringUpload() throws Exception {
+// Set dfs.namenode.checkpoint.txns differently on the first NN to avoid it
+// doing checkpoint when it becomes a standby
+cluster.getConfiguration(0).setInt(
+DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
+
 // don't compress, we want a big image
 for (int i = 0; i < NUM_NNS; i++) {
   cluster.getConfiguration(i).setBoolean(



hadoop git commit: HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. Contributed by Lin Yiqun.

2016-03-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d59542953 -> 8b00d9de2


HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. 
Contributed by Lin Yiqun.

(cherry picked from commit d4574017845cfa7521e703f80efd404afd09b8c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b00d9de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b00d9de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b00d9de

Branch: refs/heads/branch-2
Commit: 8b00d9de251e11e7d31b0ca7f51b05a34d939d7e
Parents: d595429
Author: Kihwal Lee 
Authored: Tue Mar 15 10:54:37 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Mar 15 10:54:37 2016 -0500

--
 .../hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b00d9de/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index e0c5563..e17e552 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -284,6 +284,11 @@ public class TestStandbyCheckpoints {
*/
   @Test(timeout=6)
   public void testCheckpointCancellationDuringUpload() throws Exception {
+// Set dfs.namenode.checkpoint.txns differently on the first NN to avoid it
+// doing checkpoint when it becomes a standby
+cluster.getConfiguration(0).setInt(
+DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
+
 // don't compress, we want a big image
 cluster.getConfiguration(0).setBoolean(
 DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);



hadoop git commit: HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. Contributed by Lin Yiqun.

2016-03-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d59c2c451 -> fce39557d


HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. 
Contributed by Lin Yiqun.

(cherry picked from commit d4574017845cfa7521e703f80efd404afd09b8c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fce39557
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fce39557
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fce39557

Branch: refs/heads/branch-2.8
Commit: fce39557d3c619270aaa1af1b56796e3376bf582
Parents: d59c2c4
Author: Kihwal Lee 
Authored: Tue Mar 15 10:55:10 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Mar 15 10:55:10 2016 -0500

--
 .../hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java  | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fce39557/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index e0c5563..e17e552 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -284,6 +284,11 @@ public class TestStandbyCheckpoints {
*/
   @Test(timeout=6)
   public void testCheckpointCancellationDuringUpload() throws Exception {
+// Set dfs.namenode.checkpoint.txns differently on the first NN to avoid it
+// doing checkpoint when it becomes a standby
+cluster.getConfiguration(0).setInt(
+DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
+
 // don't compress, we want a big image
 cluster.getConfiguration(0).setBoolean(
 DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);



hadoop git commit: HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. Contributed by Lin Yiqun.

2016-03-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 b409ce89e -> b8e73cff8


HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails. 
Contributed by Lin Yiqun.

(cherry picked from commit d4574017845cfa7521e703f80efd404afd09b8c4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8e73cff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8e73cff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8e73cff

Branch: refs/heads/branch-2.7
Commit: b8e73cff8f8b52a9fd48d19479c63cb8d00e97e6
Parents: b409ce8
Author: Kihwal Lee 
Authored: Tue Mar 15 10:56:46 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Mar 15 10:56:46 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8e73cff/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7842890..d29e325 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -141,6 +141,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9812. Streamer threads leak if failure happens when closing
 DFSOutputStream. (Lin Yiqun via aajisaka)
 
+HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails.
+(Lin Yiqun via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8e73cff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 33af0e2..6a0fcd7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -283,6 +283,11 @@ public class TestStandbyCheckpoints {
*/
   @Test(timeout=6)
   public void testCheckpointCancellationDuringUpload() throws Exception {
+// Set dfs.namenode.checkpoint.txns differently on the first NN to avoid it
+// doing checkpoint when it becomes a standby
+cluster.getConfiguration(0).setInt(
+DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
+
 // don't compress, we want a big image
 cluster.getConfiguration(0).setBoolean(
 DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);



hadoop git commit: HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. Contributed by Rushabh Shah.

2016-03-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 73b5a44b0 -> 242c7f1fe


HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. 
Contributed by Rushabh Shah.

(cherry picked from commit 63c966a3fbeb675959fc4101e65de9f57aecd17d)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/242c7f1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/242c7f1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/242c7f1f

Branch: refs/heads/branch-2.8
Commit: 242c7f1fee664b4d609a6c72e899f10816430f65
Parents: 73b5a44
Author: Kihwal Lee 
Authored: Fri Mar 18 10:33:13 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 18 10:33:13 2016 -0500

--
 .../hdfs/server/datanode/ReplicaInPipeline.java |  7 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 13 
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  6 ++
 .../fsdataset/impl/TestFsDatasetImpl.java   | 66 
 4 files changed, 92 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/242c7f1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index d9406f0..5caca15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -183,6 +183,13 @@ public class ReplicaInPipeline extends ReplicaInfo
 this.writer = writer;
   }
   
+  public void interruptThread() {
+if (writer != null && writer != Thread.currentThread() 
+&& writer.isAlive()) {
+  this.writer.interrupt();
+}
+  }
+
   @Override  // Object
   public boolean equals(Object o) {
 return super.equals(o);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/242c7f1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 163c8d0..2f16fc5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -3152,5 +3152,18 @@ class FsDatasetImpl implements 
FsDatasetSpi {
 evictLazyPersistBlocks(bytesNeeded);
 return cacheManager.reserve(bytesNeeded) > 0;
   }
+
+  synchronized void stopAllDataxceiverThreads(FsVolumeImpl volume) {
+for (String blockPoolId : volumeMap.getBlockPoolList()) {
+  Collection replicas = volumeMap.replicas(blockPoolId);
+  for (ReplicaInfo replicaInfo : replicas) {
+if (replicaInfo instanceof ReplicaInPipeline
+&& replicaInfo.getVolume().equals(volume)) {
+  ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) 
replicaInfo;
+  replicaInPipeline.interruptThread();
+}
+  }
+}
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/242c7f1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index e02c293..ca7610d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -239,6 +239,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
 Preconditions.checkState(reference.getReferenceCount() > 0);
   }
 
+  @VisibleForTesting
+  int getReferenceCount() {
+return this.reference.getReferenceCount();
+  }
+
   /**
* Close this 

hadoop git commit: HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. Contributed by Rushabh Shah.

2016-03-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 bb5749b1e -> a66d2a2a8


HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. 
Contributed by Rushabh Shah.

(cherry picked from commit 63c966a3fbeb675959fc4101e65de9f57aecd17d)
(cherry picked from commit 242c7f1fee664b4d609a6c72e899f10816430f65)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a66d2a2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a66d2a2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a66d2a2a

Branch: refs/heads/branch-2.7
Commit: a66d2a2a85e0db9ef164dfcf3a50d3970b51f175
Parents: bb5749b
Author: Kihwal Lee 
Authored: Fri Mar 18 10:38:33 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 18 10:38:33 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  7 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 13 
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  6 ++
 .../fsdataset/impl/TestFsDatasetImpl.java   | 66 
 5 files changed, 95 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66d2a2a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0ed2127..f01e697 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -146,6 +146,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9904. testCheckpointCancellationDuringUpload occasionally fails.
 (Lin Yiqun via kihwal)
 
+HDFS-9874. Long living DataXceiver threads cause volume shutdown to block.
+(Rushabh Shah via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66d2a2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index cc55f85..27c46e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -173,6 +173,13 @@ public class ReplicaInPipeline extends ReplicaInfo
 this.writer = writer;
   }
   
+  public void interruptThread() {
+if (writer != null && writer != Thread.currentThread() 
+&& writer.isAlive()) {
+  this.writer.interrupt();
+}
+  }
+
   @Override  // Object
   public boolean equals(Object o) {
 return super.equals(o);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66d2a2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index f4e10f1..4673029 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -3048,5 +3048,18 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   s.add(blockId);
 }
   }
+
+  synchronized void stopAllDataxceiverThreads(FsVolumeImpl volume) {
+for (String blockPoolId : volumeMap.getBlockPoolList()) {
+  Collection replicas = volumeMap.replicas(blockPoolId);
+  for (ReplicaInfo replicaInfo : replicas) {
+if (replicaInfo instanceof ReplicaInPipeline
+&& replicaInfo.getVolume().equals(volume)) {
+  ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) 
replicaInfo;
+  replicaInPipeline.interruptThread();
+}
+  }
+}
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a66d2a2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ser

hadoop git commit: HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. Contributed by Rushabh Shah.

2016-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f69a6c363 -> ead63bc29


HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. 
Contributed by Rushabh Shah.

(cherry picked from commit 63c966a3fbeb675959fc4101e65de9f57aecd17d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ead63bc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ead63bc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ead63bc2

Branch: refs/heads/branch-2
Commit: ead63bc29770ac32d611dab542d9312e1bc3d44c
Parents: f69a6c3
Author: Kihwal Lee 
Authored: Fri Mar 18 10:26:57 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 18 10:26:57 2016 -0500

--
 .../hdfs/server/datanode/ReplicaInPipeline.java |  7 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 13 
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  6 ++
 .../fsdataset/impl/TestFsDatasetImpl.java   | 66 
 4 files changed, 92 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ead63bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index d9406f0..5caca15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -183,6 +183,13 @@ public class ReplicaInPipeline extends ReplicaInfo
 this.writer = writer;
   }
   
+  public void interruptThread() {
+if (writer != null && writer != Thread.currentThread() 
+&& writer.isAlive()) {
+  this.writer.interrupt();
+}
+  }
+
   @Override  // Object
   public boolean equals(Object o) {
 return super.equals(o);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ead63bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 3e76bdc..1ebd204 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -3159,5 +3159,18 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   public void setTimer(Timer newTimer) {
 this.timer = newTimer;
   }
+
+  synchronized void stopAllDataxceiverThreads(FsVolumeImpl volume) {
+for (String blockPoolId : volumeMap.getBlockPoolList()) {
+  Collection replicas = volumeMap.replicas(blockPoolId);
+  for (ReplicaInfo replicaInfo : replicas) {
+if (replicaInfo instanceof ReplicaInPipeline
+&& replicaInfo.getVolume().equals(volume)) {
+  ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) 
replicaInfo;
+  replicaInPipeline.interruptThread();
+}
+  }
+}
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ead63bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 95deef8..6971d80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -240,6 +240,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
 Preconditions.checkState(reference.getReferenceCount() > 0);
   }
 
+  @VisibleForTesting
+  int getReferenceCount() {
+return this.reference.getReferenceCount();
+  }
+
   /**
* Close this volume.
* @throws IOException if the volume is closed.
@@ -247,6 +252,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   void setClosed()

hadoop git commit: HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. Contributed by Rushabh Shah.

2016-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk dc951e606 -> 63c966a3f


HDFS-9874. Long living DataXceiver threads cause volume shutdown to block. 
Contributed by Rushabh Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63c966a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63c966a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63c966a3

Branch: refs/heads/trunk
Commit: 63c966a3fbeb675959fc4101e65de9f57aecd17d
Parents: dc951e6
Author: Kihwal Lee 
Authored: Fri Mar 18 10:24:59 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 18 10:24:59 2016 -0500

--
 .../hdfs/server/datanode/ReplicaInPipeline.java |  7 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 13 
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  6 ++
 .../fsdataset/impl/TestFsDatasetImpl.java   | 66 
 4 files changed, 92 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c966a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index d9406f0..5caca15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -183,6 +183,13 @@ public class ReplicaInPipeline extends ReplicaInfo
 this.writer = writer;
   }
   
+  public void interruptThread() {
+if (writer != null && writer != Thread.currentThread() 
+&& writer.isAlive()) {
+  this.writer.interrupt();
+}
+  }
+
   @Override  // Object
   public boolean equals(Object o) {
 return super.equals(o);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c966a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2e8226a..d6a0df6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -3112,5 +3112,18 @@ class FsDatasetImpl implements 
FsDatasetSpi {
   public void setTimer(Timer newTimer) {
 this.timer = newTimer;
   }
+
+  synchronized void stopAllDataxceiverThreads(FsVolumeImpl volume) {
+for (String blockPoolId : volumeMap.getBlockPoolList()) {
+  Collection replicas = volumeMap.replicas(blockPoolId);
+  for (ReplicaInfo replicaInfo : replicas) {
+if (replicaInfo instanceof ReplicaInPipeline
+&& replicaInfo.getVolume().equals(volume)) {
+  ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) 
replicaInfo;
+  replicaInPipeline.interruptThread();
+}
+  }
+}
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c966a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 857d0ad..0d060f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -240,6 +240,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
 Preconditions.checkState(reference.getReferenceCount() > 0);
   }
 
+  @VisibleForTesting
+  int getReferenceCount() {
+return this.reference.getReferenceCount();
+  }
+
   /**
* Close this volume.
* @throws IOException if the volume is closed.
@@ -247,6 +252,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   void setClosed() throws IOException {
 try {
   this.reference.setClosed();
+  dataset.

hadoop git commit: Fix CHANGES.txt

2016-03-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ec459e396 -> efb210cc3


Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efb210cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efb210cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efb210cc

Branch: refs/heads/branch-2.7
Commit: efb210cc30dcbfb9da1edd74d5ec995d321366af
Parents: ec459e3
Author: Kihwal Lee 
Authored: Mon Mar 28 11:31:07 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Mar 28 11:31:07 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efb210cc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 838dd5d..04b3524 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -46,6 +46,9 @@ Release 2.7.3 - UNRELEASED
 
 HDFS-9860. Backport HDFS-9638 to branch-2.7. (Wei-Chiu Chuang via aajisaka)
 
+HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
+classes at runtime. (Sean Busbey via atm)
+
   OPTIMIZATIONS
 
 HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via
@@ -167,9 +170,6 @@ Release 2.7.2 - 2016-01-25
 
 HDFS-9574. Reduce client failures during datanode restart (kihwal)
 
-HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
-classes at runtime. (Sean Busbey via atm)
-
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)



hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 154d2532c -> a7d1fb0cd


HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7d1fb0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7d1fb0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7d1fb0c

Branch: refs/heads/trunk
Commit: a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584
Parents: 154d253
Author: Kihwal Lee 
Authored: Mon Apr 4 16:39:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:40:00 2016 -0500

--
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 4 files changed, 62 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e4ee02..fb0c1c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -574,6 +574,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 773a64c..398935d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -300,11 +300,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(
   metaIn, IO_FILE_BUFFER_SIZE));
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0e38694..7327420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,5 +50,7 @@ public class DataNodeFaultInjector {
 return false;
   }
 
+  public void stopSendingPacketDownstream() throws IOException {}
+
   public void noRegistration() throws IOExcepti

hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c981efb0f -> 9e9143329


HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.

(cherry picked from commit a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e914332
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e914332
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e914332

Branch: refs/heads/branch-2
Commit: 9e91433295cf81aeb1c9a77131cf13bae21f6431
Parents: c981efb
Author: Kihwal Lee 
Authored: Mon Apr 4 16:40:44 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:40:44 2016 -0500

--
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 4 files changed, 62 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e914332/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e4ee02..fb0c1c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -574,6 +574,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e914332/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 9f55895..6a35fce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -301,11 +301,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(
   metaIn, IO_FILE_BUFFER_SIZE));
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e914332/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0e38694..7327420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,5 +50,7 @@ public class DataNodeFaultInjector {
 return false;
   }
 
+  public void stopSendingPacketDownst

hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 97cda4077 -> 7d671cad3


HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.

(cherry picked from commit a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d671cad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d671cad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d671cad

Branch: refs/heads/branch-2.8
Commit: 7d671cad3f96b2271e913a59a3b568202799b721
Parents: 97cda40
Author: Kihwal Lee 
Authored: Mon Apr 4 16:41:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:41:23 2016 -0500

--
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 4 files changed, 62 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d671cad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 1a1cc27..bbb9096 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -566,6 +566,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d671cad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 9f55895..6a35fce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -301,11 +301,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(
   metaIn, IO_FILE_BUFFER_SIZE));
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d671cad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0e38694..7327420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,5 +50,7 @@ public class DataNodeFaultInjector {
 return false;
   }
 
+  public void stopSendingPacketDownst

hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 f242b78fd -> 69d4fa0de


HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.

(cherry picked from commit a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584)
TestClientProtocolForPipelineRecovery modified to use DFSConfigKeys instead of 
HdfsClientConfigKeys.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69d4fa0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69d4fa0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69d4fa0d

Branch: refs/heads/branch-2.7
Commit: 69d4fa0deb27a4992cee15203277b2caa9d26d79
Parents: f242b78
Author: Kihwal Lee 
Authored: Mon Apr 4 16:52:52 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:54:04 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 5 files changed, 65 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d4fa0d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 04b3524..6c881ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -152,6 +152,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9874. Long living DataXceiver threads cause volume shutdown to block.
 (Rushabh Shah via kihwal)
 
+HDFS-10178. Permanent write failures can happen if pipeline recoveries
+occur for the first packet (kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d4fa0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 27783e2..946e2de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -553,6 +553,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d4fa0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index e76b93a..0cb387e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -292,11 +292,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(

hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eeff2e35f -> ef3da8235


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef3da823
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef3da823
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef3da823

Branch: refs/heads/branch-2
Commit: ef3da823573cbf16fd1d84479330dd457f95e0ff
Parents: eeff2e3
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:16:05 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 33 +++-
 2 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3da823/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 1c7316a..20cecb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3da823/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index b396762..a9791b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -521,7 +521,38 @@ public class TestDFSShell {
   }
 }
   }
-  
+
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 9d3c51eb5 -> f9764d073


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

(cherry picked from commit ef3da823573cbf16fd1d84479330dd457f95e0ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9764d07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9764d07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9764d07

Branch: refs/heads/branch-2.8
Commit: f9764d073fd8bc70ae63614f166aeac0325d6a4d
Parents: 9d3c51e
Author: Kihwal Lee 
Authored: Tue Apr 5 09:16:51 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:16:51 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 33 +++-
 2 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9764d07/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 1c7316a..20cecb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9764d07/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index b396762..a9791b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -521,7 +521,38 @@ public class TestDFSShell {
   }
 }
   }
-  
+
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 49428ab6b -> 960860133


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

(cherry picked from commit ef3da823573cbf16fd1d84479330dd457f95e0ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96086013
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96086013
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96086013

Branch: refs/heads/branch-2.7
Commit: 9608601330520990d4470971389d574cfa09736e
Parents: 49428ab
Author: Kihwal Lee 
Authored: Tue Apr 5 09:17:57 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:20:38 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 33 +++-
 3 files changed, 40 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96086013/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 1c7316a..20cecb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96086013/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3a77f02..bb37eb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -158,6 +158,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9917. IBR accumulate more objects when SNN was down for sometime.
 (Brahma Reddy Battula via vinayakumarb)
 
+HDFS-10239. Fsshell mv fails if port usage doesn't match in src and
+    destination paths (Kuhu Shukla via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96086013/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 828d89d..9924775 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -519,7 +519,38 @@ public class TestDFSShell {
   }
 }
   }
-  
+
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @T

hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6be28bcc4 -> 917464505


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91746450
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91746450
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91746450

Branch: refs/heads/trunk
Commit: 917464505c0e930ebeb4c775d829e51c56a48686
Parents: 6be28bc
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:07:24 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 31 
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 02a3b25..d359282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 41cd5c0..b75ac11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -559,6 +559,37 @@ public class TestDFSShell {
 }
   }
 
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0cd320a84 -> 9ba1e5af0


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ba1e5af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ba1e5af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ba1e5af

Branch: refs/heads/trunk
Commit: 9ba1e5af06070ba01dcf46e1a4c66713a1d43352
Parents: 0cd320a
Author: Kihwal Lee 
Authored: Tue Apr 5 16:26:18 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:26:18 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 46 ++--
 1 file changed, 33 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ba1e5af/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index ed53512..b8fc30d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -39,6 +44,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -49,22 +57,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-setNNs();
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
 
-cluster.transitionToActive(0);
+setNNs();
+fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2ca6251a6 -> 6e37c5fe8


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.

(cherry picked from commit 9ba1e5af06070ba01dcf46e1a4c66713a1d43352)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e37c5fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e37c5fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e37c5fe

Branch: refs/heads/branch-2
Commit: 6e37c5fe80ee9ce46afc26838391cc27ca6b1e6f
Parents: 2ca6251
Author: Kihwal Lee 
Authored: Tue Apr 5 16:49:59 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:49:59 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 47 ++--
 1 file changed, 33 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e37c5fe/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index b74cd7f..3299673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -35,6 +40,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -45,23 +53,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-nn0 = cluster.getNameNode(0);
-nn1 = cluster.getNameNode(1);
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+fs = HATestUtil.configureFailoverFs(cluster, conf);
 
-cluster.transitionToActive(0);
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7286c435c -> 8bb465564


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.

(cherry picked from commit 9ba1e5af06070ba01dcf46e1a4c66713a1d43352)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java

(cherry picked from commit 6e37c5fe80ee9ce46afc26838391cc27ca6b1e6f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bb46556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bb46556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bb46556

Branch: refs/heads/branch-2.8
Commit: 8bb46556498b9c40ec8d13230ee2e3a7790f572c
Parents: 7286c43
Author: Kihwal Lee 
Authored: Tue Apr 5 16:54:46 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:54:46 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 47 ++--
 1 file changed, 33 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb46556/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index b74cd7f..3299673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -35,6 +40,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -45,23 +53,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-nn0 = cluster.getNameNode(0);
-nn1 = cluster.getNameNode(1);
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+fs = HATestUtil.configureFailoverFs(cluster, conf);
 
-cluster.transitionToActive(0);
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 76e0bb7a1 -> dd701c980


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.

(cherry picked from commit 9ba1e5af06070ba01dcf46e1a4c66713a1d43352)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java

(cherry picked from commit 6e37c5fe80ee9ce46afc26838391cc27ca6b1e6f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd701c98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd701c98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd701c98

Branch: refs/heads/branch-2.7
Commit: dd701c9800ed241f66d4f0777f50affcc21544e7
Parents: 76e0bb7
Author: Kihwal Lee 
Authored: Tue Apr 5 17:00:42 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 17:00:42 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 47 ++--
 1 file changed, 33 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd701c98/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index b74cd7f..3299673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -35,6 +40,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -45,23 +53,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-nn0 = cluster.getNameNode(0);
-nn1 = cluster.getNameNode(1);
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+fs = HATestUtil.configureFailoverFs(cluster, conf);
 
-cluster.transitionToActive(0);
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: Revert "HDFS-8791. block ID-based DN storage layout can be very slow for datanode on ext4. Contributed by Chris Trezzo."

2016-04-06 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 62da8f6fa -> 8e784afb1


Revert "HDFS-8791. block ID-based DN storage layout can be very slow for 
datanode on ext4. Contributed by Chris Trezzo."

This reverts commit 9bc9e13a979c775354b2394d926f466a0d5be514.

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e784afb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e784afb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e784afb

Branch: refs/heads/branch-2.7
Commit: 8e784afb1596b06c08493d4ceb1b743e5ba61edc
Parents: 62da8f6
Author: Kihwal Lee 
Authored: Wed Apr 6 17:35:54 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 6 17:35:54 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 --
 .../server/datanode/DataNodeLayoutVersion.java  |   5 +---
 .../hdfs/server/datanode/DataStorage.java   |  11 +++
 .../hdfs/server/datanode/DatanodeUtil.java  |   4 +--
 .../hadoop/hdfs/TestDatanodeLayoutUpgrade.java  |  29 ++-
 .../resources/hadoop-56-layout-datanode-dir.tgz | Bin 198996 -> 0 bytes
 .../resources/hadoop-to-57-dn-layout-dir.txt|  24 ---
 7 files changed, 9 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e784afb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bb37eb8..2441d09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -38,9 +38,6 @@ Release 2.7.3 - UNRELEASED
 
 HDFS-9395. Make HDFS audit logging consistant (Kuhu Shukla via kihwal)
 
-HDFS-8791. block ID-based DN storage layout can be very slow for datanode
-on ext4 (Chris Trezzo via kihwal)
-
 HDFS-9048. DistCp documentation is out-of-dated
 (Daisuke Kobayashi via iwasakims)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e784afb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
index 609a740..23e7cfe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
@@ -65,10 +65,7 @@ public class DataNodeLayoutVersion {
 FIRST_LAYOUT(-55, -53, "First datanode layout", false),
 BLOCKID_BASED_LAYOUT(-56,
 "The block ID of a finalized block uniquely determines its position " +
-"in the directory structure"),
-BLOCKID_BASED_LAYOUT_32_by_32(-57,
-"Identical to the block id based layout (-56) except it uses a smaller"
-+ " directory structure (32x32)");
+"in the directory structure");

 private final FeatureInfo info;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e784afb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 7681d3c..8656ae9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -1117,13 +1117,10 @@ public class DataStorage extends Storage {
 LOG.info("Start linking block files from " + from + " to " + to);
 boolean upgradeToIdBasedLayout = false;
 // If we are upgrading from a version older than the one where we 
introduced
-// block ID-based layout (32x32) AND we're working with the finalized
-// directory, we'll need to upgrade from the old layout to the new one. The
-// upgrade path from pre-blockid based layouts (>-56) and blockid based
-// 256x256 layouts (-56) is fortunately the same.
-if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT_32_by_32
-.getInfo().getLayoutVersion()
-&am

hadoop git commit: HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák

2016-04-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c195b1c9f -> 4a7e52d30


HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák

(cherry picked from commit d2f3bbc29046435904ad9418073795439c71b441)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a7e52d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a7e52d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a7e52d3

Branch: refs/heads/branch-2
Commit: 4a7e52d302b7df62066e8b2e1b859a019e47b79a
Parents: c195b1c
Author: Kihwal Lee 
Authored: Wed Apr 13 11:24:06 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 13 11:24:06 2016 -0500

--
 .../src/test/java/org/apache/hadoop/tools/TestJMXGet.java | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a7e52d3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index 7049a0d..f64ee9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -123,9 +123,6 @@ public class TestJMXGet {
 jmx.getValue("NumLiveDataNodes")));
 assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
 getMetrics("FSNamesystem"));
-DFSTestUtil.waitForMetric(jmx, "NumOpenConnections", numDatanodes);
-assertEquals(numDatanodes, Integer.parseInt(
-jmx.getValue("NumOpenConnections")));
 
 cluster.shutdown();
 MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();



hadoop git commit: HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák

2016-04-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c85a63aba -> ccaf746ee


HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák

(cherry picked from commit d2f3bbc29046435904ad9418073795439c71b441)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccaf746e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccaf746e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccaf746e

Branch: refs/heads/branch-2.8
Commit: ccaf746eeacfafc7392a994f28ee9dbf595c84af
Parents: c85a63a
Author: Kihwal Lee 
Authored: Wed Apr 13 11:24:45 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 13 11:24:45 2016 -0500

--
 .../src/test/java/org/apache/hadoop/tools/TestJMXGet.java | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf746e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index 7049a0d..f64ee9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -123,9 +123,6 @@ public class TestJMXGet {
 jmx.getValue("NumLiveDataNodes")));
 assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
 getMetrics("FSNamesystem"));
-DFSTestUtil.waitForMetric(jmx, "NumOpenConnections", numDatanodes);
-assertEquals(numDatanodes, Integer.parseInt(
-jmx.getValue("NumOpenConnections")));
 
 cluster.shutdown();
 MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();



hadoop git commit: HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák

2016-04-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 903428bf9 -> d2f3bbc29


HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2f3bbc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2f3bbc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2f3bbc2

Branch: refs/heads/trunk
Commit: d2f3bbc29046435904ad9418073795439c71b441
Parents: 903428b
Author: Kihwal Lee 
Authored: Wed Apr 13 11:22:36 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 13 11:22:36 2016 -0500

--
 .../src/test/java/org/apache/hadoop/tools/TestJMXGet.java | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2f3bbc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index a9e41ec..f83e7d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -111,9 +111,6 @@ public class TestJMXGet {
 jmx.getValue("NumLiveDataNodes")));
 assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
 getMetrics("FSNamesystem"));
-DFSTestUtil.waitForMetric(jmx, "NumOpenConnections", numDatanodes);
-assertEquals(numDatanodes, Integer.parseInt(
-jmx.getValue("NumOpenConnections")));
 
 cluster.shutdown();
 MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();



hadoop git commit: HDFS-10282. The VolumeScanner should warn about replica files which are misplaced. Contributed by Colin Patrick McCabe.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk df18b6e98 -> 0d1c1152f


HDFS-10282. The VolumeScanner should warn about replica files which are 
misplaced. Contributed by Colin Patrick McCabe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d1c1152
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d1c1152
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d1c1152

Branch: refs/heads/trunk
Commit: 0d1c1152f1ce2706f92109bfbdff0d62e98e6797
Parents: df18b6e9
Author: Kihwal Lee 
Authored: Thu Apr 14 07:58:24 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 07:58:24 2016 -0500

--
 .../hdfs/server/datanode/DirectoryScanner.java  | 14 ++---
 .../hdfs/server/datanode/VolumeScanner.java |  2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 12 
 .../server/datanode/FsDatasetTestUtils.java |  7 +++
 .../hdfs/server/datanode/TestBlockScanner.java  | 63 
 .../fsdataset/impl/FsDatasetImplTestUtils.java  | 21 +++
 6 files changed, 111 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d1c1152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 0e51cec..1db445e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -914,13 +914,13 @@ public class DirectoryScanner implements Runnable {
  */
 private void verifyFileLocation(File actualBlockFile,
 File bpFinalizedDir, long blockId) {
-  File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
-  if (actualBlockFile.getParentFile().compareTo(blockDir) != 0) {
-File expBlockFile = new File(blockDir, actualBlockFile.getName());
-LOG.warn("Block: " + blockId
-+ " has to be upgraded to block ID-based layout. "
-+ "Actual block file path: " + actualBlockFile
-+ ", expected block file path: " + expBlockFile);
+  File expectedBlockDir =
+  DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
+  File actualBlockDir = actualBlockFile.getParentFile();
+  if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
+LOG.warn("Block: " + blockId +
+" found in invalid directory.  Expected directory: " +
+expectedBlockDir + ".  Actual directory: " + actualBlockDir);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d1c1152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index d1f2d05..d0dc9ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -414,7 +414,7 @@ public class VolumeScanner extends Thread {
   Block b = volume.getDataset().getStoredBlock(
   cblock.getBlockPoolId(), cblock.getBlockId());
   if (b == null) {
-LOG.info("FileNotFound while finding block {} on volume {}",
+LOG.info("Replica {} was not found in the VolumeMap for volume {}",
 cblock, volume.getBasePath());
   } else {
 block = new ExtendedBlock(cblock.getBlockPoolId(), b);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d1c1152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 0d060f9..73514b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop

hadoop git commit: HDFS-10282. The VolumeScanner should warn about replica files which are misplaced. Contributed by Colin Patrick McCabe.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e124c3a2a -> 51fc96f5c


HDFS-10282. The VolumeScanner should warn about replica files which are 
misplaced. Contributed by Colin Patrick McCabe.

(cherry picked from commit 0d1c1152f1ce2706f92109bfbdff0d62e98e6797)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51fc96f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51fc96f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51fc96f5

Branch: refs/heads/branch-2
Commit: 51fc96f5cf5d5e1253e65298db32e90ba57402ad
Parents: e124c3a
Author: Kihwal Lee 
Authored: Thu Apr 14 07:59:41 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 07:59:41 2016 -0500

--
 .../hdfs/server/datanode/DirectoryScanner.java  | 14 ++---
 .../hdfs/server/datanode/VolumeScanner.java |  2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 12 
 .../server/datanode/FsDatasetTestUtils.java |  7 +++
 .../hdfs/server/datanode/TestBlockScanner.java  | 63 
 .../fsdataset/impl/FsDatasetImplTestUtils.java  | 21 +++
 6 files changed, 111 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51fc96f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 0e51cec..1db445e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -914,13 +914,13 @@ public class DirectoryScanner implements Runnable {
  */
 private void verifyFileLocation(File actualBlockFile,
 File bpFinalizedDir, long blockId) {
-  File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
-  if (actualBlockFile.getParentFile().compareTo(blockDir) != 0) {
-File expBlockFile = new File(blockDir, actualBlockFile.getName());
-LOG.warn("Block: " + blockId
-+ " has to be upgraded to block ID-based layout. "
-+ "Actual block file path: " + actualBlockFile
-+ ", expected block file path: " + expBlockFile);
+  File expectedBlockDir =
+  DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
+  File actualBlockDir = actualBlockFile.getParentFile();
+  if (actualBlockDir.compareTo(expectedBlockDir) != 0) {
+LOG.warn("Block: " + blockId +
+" found in invalid directory.  Expected directory: " +
+expectedBlockDir + ".  Actual directory: " + actualBlockDir);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51fc96f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index d1f2d05..d0dc9ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -414,7 +414,7 @@ public class VolumeScanner extends Thread {
   Block b = volume.getDataset().getStoredBlock(
   cblock.getBlockPoolId(), cblock.getBlockId());
   if (b == null) {
-LOG.info("FileNotFound while finding block {} on volume {}",
+LOG.info("Replica {} was not found in the VolumeMap for volume {}",
 cblock, volume.getBasePath());
   } else {
 block = new ExtendedBlock(cblock.getBlockPoolId(), b);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51fc96f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 6971d80..57c39e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdf

hadoop git commit: HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by Wei-Chiu Chuang.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 404f57f32 -> c970f1d00


HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by 
Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c970f1d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c970f1d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c970f1d0

Branch: refs/heads/trunk
Commit: c970f1d00525e4273075cff7406dcbd71305abd5
Parents: 404f57f3
Author: Kihwal Lee 
Authored: Thu Apr 14 12:45:47 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 12:45:47 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 4 
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 2 ++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c970f1d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index a35246f..08d3da5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -427,6 +427,7 @@ public class DFSAdmin extends FsShell {
 "\t[-allowSnapshot ]\n" +
 "\t[-disallowSnapshot ]\n" +
 "\t[-shutdownDatanode  [upgrade]]\n" +
+"\t[-evictWriters ]\n" +
 "\t[-getDatanodeInfo ]\n" +
 "\t[-metasave filename]\n" +
 "\t[-triggerBlockReport [-incremental] ]\n" +
@@ -1829,6 +1830,9 @@ public class DFSAdmin extends FsShell {
 } else if ("-shutdownDatanode".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-shutdownDatanode  [upgrade]]");
+} else if ("-evictWriters".equals(cmd)) {
+  System.err.println("Usage: hdfs dfsadmin"
+  + " [-evictWriters ]");
 } else if ("-getDatanodeInfo".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-getDatanodeInfo ]");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c970f1d0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 035abd6..a6c8b4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -385,6 +385,7 @@ Usage:
 hdfs dfsadmin [-allowSnapshot ]
 hdfs dfsadmin [-disallowSnapshot ]
 hdfs dfsadmin [-shutdownDatanode  [upgrade]]
+hdfs dfsadmin [-evictWriters ]
 hdfs dfsadmin [-getDatanodeInfo ]
 hdfs dfsadmin [-metasave filename]
 hdfs dfsadmin [-triggerBlockReport [-incremental] 
]
@@ -419,6 +420,7 @@ Usage:
 | `-allowSnapshot` \ | Allowing snapshots of a directory to be 
created. If the operation completes successfully, the directory becomes 
snapshottable. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for 
more information. |
 | `-disallowSnapshot` \ | Disallowing snapshots of a directory 
to be created. All snapshots of the directory must be deleted before 
disallowing snapshots. See the [HDFS Snapshot 
Documentation](./HdfsSnapshots.html) for more information. |
 | `-shutdownDatanode` \ [upgrade] | Submit a 
shutdown request for the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode) for the detail. 
|
+| `-evictWriters` \ | Make the datanode evict all 
clients that are writing a block. This is useful if decommissioning is hung due 
to slow writers. |
 | `-getDatanodeInfo` \ | Get the information about 
the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
 | `-metasave` filename | Save Namenode's primary data structures to *filename* 
in the directory specified by hadoop.log.dir property. *filename* is 
overwritten if it exists. *filename* will contain one line for each of the 
following1. Datanodes heart beating with Namenode2. Blocks waiting to 
be replicated3. Blocks currently being replicated4. Blocks waiting to 
be deleted |
 | `-triggerBlockReport` `[-incremental]` \ | 
Trigger a block report for the given datanode. If 'incremental' is specified, 
it will be otherwise, it will be a full block report. |



hadoop git commit: HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by Wei-Chiu Chuang.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 864baf23a -> 5e8243137


HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by 
Wei-Chiu Chuang.

(cherry picked from commit c970f1d00525e4273075cff7406dcbd71305abd5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e824313
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e824313
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e824313

Branch: refs/heads/branch-2
Commit: 5e8243137495d6cfd306068b4e2c93eda16b5f62
Parents: 864baf2
Author: Kihwal Lee 
Authored: Thu Apr 14 12:47:25 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 12:47:25 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 4 
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 2 ++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e824313/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 56bf5df..cd6b862 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -428,6 +428,7 @@ public class DFSAdmin extends FsShell {
 "\t[-allowSnapshot ]\n" +
 "\t[-disallowSnapshot ]\n" +
 "\t[-shutdownDatanode  [upgrade]]\n" +
+"\t[-evictWriters ]\n" +
 "\t[-getDatanodeInfo ]\n" +
 "\t[-metasave filename]\n" +
 "\t[-triggerBlockReport [-incremental] ]\n" +
@@ -1802,6 +1803,9 @@ public class DFSAdmin extends FsShell {
 } else if ("-shutdownDatanode".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-shutdownDatanode  [upgrade]]");
+} else if ("-evictWriters".equals(cmd)) {
+  System.err.println("Usage: hdfs dfsadmin"
+  + " [-evictWriters ]");
 } else if ("-getDatanodeInfo".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-getDatanodeInfo ]");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e824313/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index d9453f8..84bb31c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -376,6 +376,7 @@ Usage:
 hdfs dfsadmin [-allowSnapshot ]
 hdfs dfsadmin [-disallowSnapshot ]
 hdfs dfsadmin [-shutdownDatanode  [upgrade]]
+hdfs dfsadmin [-evictWriters ]
 hdfs dfsadmin [-getDatanodeInfo ]
 hdfs dfsadmin [-metasave filename]
 hdfs dfsadmin [-triggerBlockReport [-incremental] 
]
@@ -410,6 +411,7 @@ Usage:
 | `-allowSnapshot` \ | Allowing snapshots of a directory to be 
created. If the operation completes successfully, the directory becomes 
snapshottable. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for 
more information. |
 | `-disallowSnapshot` \ | Disallowing snapshots of a directory 
to be created. All snapshots of the directory must be deleted before 
disallowing snapshots. See the [HDFS Snapshot 
Documentation](./HdfsSnapshots.html) for more information. |
 | `-shutdownDatanode` \ [upgrade] | Submit a 
shutdown request for the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode) for the detail. 
|
+| `-evictWriters` \ | Make the datanode evict all 
clients that are writing a block. This is useful if decommissioning is hung due 
to slow writers. |
 | `-getDatanodeInfo` \ | Get the information about 
the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
 | `-metasave` filename | Save Namenode's primary data structures to *filename* 
in the directory specified by hadoop.log.dir property. *filename* is 
overwritten if it exists. *filename* will contain one line for each of the 
following1. Datanodes heart beating with Namenode2. Blocks waiting to 
be replicated3. Blocks currently being replicated4. Blocks waiting to 
be deleted |
 | `-triggerBlockReport` `[-incremental]` \ | 
Trigger a block report for the given datanode. If 'incremental' is specified, 
it will be otherwise, it will be a full block report. |



hadoop git commit: HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by Wei-Chiu Chuang.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4d9001456 -> 019ca8f7c


HDFS-10280. Document new dfsadmin command -evictWriters. Contributed by 
Wei-Chiu Chuang.

(cherry picked from commit c970f1d00525e4273075cff7406dcbd71305abd5)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/019ca8f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/019ca8f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/019ca8f7

Branch: refs/heads/branch-2.8
Commit: 019ca8f7c1bd339082e0a1fb715cba23b293f5e8
Parents: 4d90014
Author: Kihwal Lee 
Authored: Thu Apr 14 12:50:36 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 12:50:36 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 4 
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 2 ++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/019ca8f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 96b7542..531b60b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -425,6 +425,7 @@ public class DFSAdmin extends FsShell {
 "\t[-allowSnapshot ]\n" +
 "\t[-disallowSnapshot ]\n" +
 "\t[-shutdownDatanode  [upgrade]]\n" +
+"\t[-evictWriters ]\n" +
 "\t[-getDatanodeInfo ]\n" +
 "\t[-metasave filename]\n" +
 "\t[-triggerBlockReport [-incremental] ]\n" +
@@ -1717,6 +1718,9 @@ public class DFSAdmin extends FsShell {
 } else if ("-shutdownDatanode".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-shutdownDatanode  [upgrade]]");
+} else if ("-evictWriters".equals(cmd)) {
+  System.err.println("Usage: hdfs dfsadmin"
+  + " [-evictWriters ]");
 } else if ("-getDatanodeInfo".equals(cmd)) {
   System.err.println("Usage: hdfs dfsadmin"
   + " [-getDatanodeInfo ]");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/019ca8f7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index f4ee836..9039a40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -347,6 +347,7 @@ Usage:
   [-fetchImage ]
   [-shutdownDatanode  [upgrade]]
   [-getDatanodeInfo ]
+  [-evictWriters ]
   [-triggerBlockReport [-incremental] ]
   [-help [cmd]]
 
@@ -380,6 +381,7 @@ Usage:
 | `-disallowSnapshot` \ | Disallowing snapshots of a directory 
to be created. All snapshots of the directory must be deleted before 
disallowing snapshots. See the [HDFS Snapshot 
Documentation](./HdfsSnapshots.html) for more information. |
 | `-fetchImage` \ | Downloads the most recent fsimage from 
the NameNode and saves it in the specified local directory. |
 | `-shutdownDatanode` \ [upgrade] | Submit a 
shutdown request for the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode) for the detail. 
|
+| `-evictWriters` \ | Make the datanode evict all 
clients that are writing a block. This is useful if decommissioning is hung due 
to slow writers. |
 | `-getDatanodeInfo` \ | Get the information about 
the given datanode. See [Rolling Upgrade 
document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
 | `-triggerBlockReport` `[-incremental]` \ | 
Trigger a block report for the given datanode. If 'incremental' is specified, 
it will be otherwise, it will be a full block report. |
 | `-help` [cmd] | Displays help for the given command or all commands if none 
is specified. |



hadoop git commit: HDFS-10292. Add block id when client got Unable to close file exception. Contributed by Brahma Reddy Battula.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3150ae810 -> 2c155afe2


HDFS-10292. Add block id when client got Unable to close file exception. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c155afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c155afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c155afe

Branch: refs/heads/trunk
Commit: 2c155afe2736a5571bbb3bdfb2fe6f9709227229
Parents: 3150ae8
Author: Kihwal Lee 
Authored: Thu Apr 14 14:25:11 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 14:25:11 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c155afe/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index dc88e08..0f82799 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -811,7 +811,7 @@ public class DFSOutputStream extends FSOutputSummer
 try {
   if (retries == 0) {
 throw new IOException("Unable to close file because the last block"
-+ " does not have enough number of replicas.");
++ last + " does not have enough number of replicas.");
   }
   retries--;
   Thread.sleep(sleeptime);



hadoop git commit: HDFS-10292. Add block id when client got Unable to close file exception. Contributed by Brahma Reddy Battula.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ece01478c -> 0bc8c327e


HDFS-10292. Add block id when client got Unable to close file exception. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit 2c155afe2736a5571bbb3bdfb2fe6f9709227229)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bc8c327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bc8c327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bc8c327

Branch: refs/heads/branch-2
Commit: 0bc8c327e9c3df31afccde6313af0fa333e63185
Parents: ece0147
Author: Kihwal Lee 
Authored: Thu Apr 14 14:26:23 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 14:26:23 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bc8c327/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 6d29ec8..18509f8 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -815,7 +815,7 @@ public class DFSOutputStream extends FSOutputSummer
 try {
   if (retries == 0) {
 throw new IOException("Unable to close file because the last block"
-+ " does not have enough number of replicas.");
++ last + " does not have enough number of replicas.");
   }
   retries--;
   Thread.sleep(sleeptime);



hadoop git commit: HDFS-10292. Add block id when client got Unable to close file exception. Contributed by Brahma Reddy Battula.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 019ca8f7c -> 7dd879893


HDFS-10292. Add block id when client got Unable to close file exception. 
Contributed by Brahma Reddy Battula.

(cherry picked from commit 2c155afe2736a5571bbb3bdfb2fe6f9709227229)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dd87989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dd87989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dd87989

Branch: refs/heads/branch-2.8
Commit: 7dd8798933d71f792d9a61a2bcbe41c3ac78ebc8
Parents: 019ca8f
Author: Kihwal Lee 
Authored: Thu Apr 14 14:26:59 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 14:26:59 2016 -0500

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd87989/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 6d29ec8..18509f8 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -815,7 +815,7 @@ public class DFSOutputStream extends FSOutputSummer
 try {
   if (retries == 0) {
 throw new IOException("Unable to close file because the last block"
-+ " does not have enough number of replicas.");
++ last + " does not have enough number of replicas.");
   }
   retries--;
   Thread.sleep(sleeptime);



hadoop git commit: HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by Mingliang Liu.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2c155afe2 -> b9c9d0359


HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9c9d035
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9c9d035
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9c9d035

Branch: refs/heads/trunk
Commit: b9c9d03591a49be31f3fbc738d01a31700bfdbc4
Parents: 2c155af
Author: Kihwal Lee 
Authored: Thu Apr 14 15:24:39 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 15:24:39 2016 -0500

--
 .../ha/TestPendingCorruptDnMessages.java| 51 +++-
 1 file changed, 28 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9c9d035/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index 5f116d9..5063acd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URISyntaxException;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,19 +39,22 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.util.ThreadUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Supplier;
+
 import org.junit.Test;
 
 public class TestPendingCorruptDnMessages {
   
   private static final Path filePath = new Path("/foo.txt");
   
-  @Test
+  @Test (timeout = 6)
   public void testChangedStorageId() throws IOException, URISyntaxException,
-  InterruptedException {
+  InterruptedException, TimeoutException {
 HdfsConfiguration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(1)
 .nnTopology(MiniDFSNNTopology.simpleHATopology())
 .build();
@@ -83,27 +88,27 @@ public class TestPendingCorruptDnMessages {
   
   // Wait until the standby NN queues up the corrupt block in the pending 
DN
   // message queue.
-  while (cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount() < 1) {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-  }
-  
-  assertEquals(1, cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount());
-  String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return cluster.getNamesystem(1).getBlockManager()
+  .getPendingDataNodeMessageCount() == 1;
+}
+  }, 1000, 3);
+
+  final String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  assertNotNull(oldStorageId);
   
   // Reformat/restart the DN.
   assertTrue(wipeAndRestartDn(cluster, 0));
   
-  // Give the DN time to start up and register, which will cause the
-  // DatanodeManager to dissociate the old storage ID from the DN xfer 
addr.
-  String newStorageId = "";
-  do {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-newStorageId = getRegisteredDatanodeUid(cluster, 1);
-System.out.println("> oldStorageId: " + oldStorageId +
-" newStorageId: " + newStorageId);
-  } while (newStorageId.equals(oldStorageId));
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  final String newStorageId = getReg

hadoop git commit: HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by Mingliang Liu.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0bc8c327e -> 9b3f1139b


HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by 
Mingliang Liu.

(cherry picked from commit b9c9d03591a49be31f3fbc738d01a31700bfdbc4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b3f1139
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b3f1139
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b3f1139

Branch: refs/heads/branch-2
Commit: 9b3f1139ba7a5541b99239aaa5e674078e0fbf00
Parents: 0bc8c32
Author: Kihwal Lee 
Authored: Thu Apr 14 15:27:40 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 15:27:40 2016 -0500

--
 .../ha/TestPendingCorruptDnMessages.java| 51 +++-
 1 file changed, 28 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b3f1139/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index 5f116d9..5063acd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URISyntaxException;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,19 +39,22 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.util.ThreadUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Supplier;
+
 import org.junit.Test;
 
 public class TestPendingCorruptDnMessages {
   
   private static final Path filePath = new Path("/foo.txt");
   
-  @Test
+  @Test (timeout = 6)
   public void testChangedStorageId() throws IOException, URISyntaxException,
-  InterruptedException {
+  InterruptedException, TimeoutException {
 HdfsConfiguration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(1)
 .nnTopology(MiniDFSNNTopology.simpleHATopology())
 .build();
@@ -83,27 +88,27 @@ public class TestPendingCorruptDnMessages {
   
   // Wait until the standby NN queues up the corrupt block in the pending 
DN
   // message queue.
-  while (cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount() < 1) {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-  }
-  
-  assertEquals(1, cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount());
-  String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return cluster.getNamesystem(1).getBlockManager()
+  .getPendingDataNodeMessageCount() == 1;
+}
+  }, 1000, 3);
+
+  final String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  assertNotNull(oldStorageId);
   
   // Reformat/restart the DN.
   assertTrue(wipeAndRestartDn(cluster, 0));
   
-  // Give the DN time to start up and register, which will cause the
-  // DatanodeManager to dissociate the old storage ID from the DN xfer 
addr.
-  String newStorageId = "";
-  do {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-newStorageId = getRegisteredDatanodeUid(cluster, 1);
-System.out.println("> oldStorageId: " + oldStorageId +
-" newStorageId: " + newStorageId);
-  } while (newStorageId.equals(oldStorageId));
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+ 

hadoop git commit: HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by Mingliang Liu.

2016-04-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 9b5c5bd42 -> 8b1e7842e


HDFS-10281. TestPendingCorruptDnMessages fails intermittently. Contributed by 
Mingliang Liu.

(cherry picked from commit b9c9d03591a49be31f3fbc738d01a31700bfdbc4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b1e7842
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b1e7842
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b1e7842

Branch: refs/heads/branch-2.8
Commit: 8b1e7842e3e08496edfc37f888ae86a8d0cb6c6a
Parents: 9b5c5bd
Author: Kihwal Lee 
Authored: Thu Apr 14 15:30:59 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 14 15:30:59 2016 -0500

--
 .../ha/TestPendingCorruptDnMessages.java| 51 +++-
 1 file changed, 28 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b1e7842/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index 5f116d9..5063acd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URISyntaxException;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -37,19 +39,22 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.util.ThreadUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Supplier;
+
 import org.junit.Test;
 
 public class TestPendingCorruptDnMessages {
   
   private static final Path filePath = new Path("/foo.txt");
   
-  @Test
+  @Test (timeout = 6)
   public void testChangedStorageId() throws IOException, URISyntaxException,
-  InterruptedException {
+  InterruptedException, TimeoutException {
 HdfsConfiguration conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(1)
 .nnTopology(MiniDFSNNTopology.simpleHATopology())
 .build();
@@ -83,27 +88,27 @@ public class TestPendingCorruptDnMessages {
   
   // Wait until the standby NN queues up the corrupt block in the pending 
DN
   // message queue.
-  while (cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount() < 1) {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-  }
-  
-  assertEquals(1, cluster.getNamesystem(1).getBlockManager()
-  .getPendingDataNodeMessageCount());
-  String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return cluster.getNamesystem(1).getBlockManager()
+  .getPendingDataNodeMessageCount() == 1;
+}
+  }, 1000, 3);
+
+  final String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
+  assertNotNull(oldStorageId);
   
   // Reformat/restart the DN.
   assertTrue(wipeAndRestartDn(cluster, 0));
   
-  // Give the DN time to start up and register, which will cause the
-  // DatanodeManager to dissociate the old storage ID from the DN xfer 
addr.
-  String newStorageId = "";
-  do {
-ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
-newStorageId = getRegisteredDatanodeUid(cluster, 1);
-System.out.println("> oldStorageId: " + oldStorageId +
-" newStorageId: " + newStorageId);
-  } while (newStorageId.equals(oldStorageId));
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+ 

hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab903029a -> d8b729e16


HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8b729e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8b729e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8b729e1

Branch: refs/heads/trunk
Commit: d8b729e16fb253e6c84f414d419b5663d9219a43
Parents: ab90302
Author: Kihwal Lee 
Authored: Mon Apr 18 07:58:55 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 07:58:55 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8b729e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index f20f5fb..474a5e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a06ee5a71 -> aa846bd49


HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.

(cherry picked from commit d8b729e16fb253e6c84f414d419b5663d9219a43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa846bd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa846bd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa846bd4

Branch: refs/heads/branch-2
Commit: aa846bd49271d1911385fcaa0b74526d80304fd0
Parents: a06ee5a
Author: Kihwal Lee 
Authored: Mon Apr 18 08:00:44 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 08:00:44 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa846bd4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index ee891a5..63e96c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-10302. BlockPlacementPolicyDefault should use default replication considerload value. Contributed by Lin Yiqun.

2016-04-18 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 09ffc65d9 -> 7ac2e5ec7


HDFS-10302. BlockPlacementPolicyDefault should use default replication 
considerload value. Contributed by  Lin Yiqun.

(cherry picked from commit d8b729e16fb253e6c84f414d419b5663d9219a43)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ac2e5ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ac2e5ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ac2e5ec

Branch: refs/heads/branch-2.8
Commit: 7ac2e5ec7be31e19e80862510acdd927ce3f87fd
Parents: 09ffc65
Author: Kihwal Lee 
Authored: Mon Apr 18 08:01:17 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 18 08:01:17 2016 -0500

--
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ac2e5ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index ee891a5..63e96c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -79,7 +79,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  NetworkTopology clusterMap, 
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
-DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
 this.considerLoadFactor = conf.getDouble(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);



hadoop git commit: HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in the clear progress. Contributed by Phil Yang.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 19f0f9608 -> 5d4255a80


HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in 
the clear progress. Contributed by Phil Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d4255a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d4255a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d4255a8

Branch: refs/heads/trunk
Commit: 5d4255a80156d2cacfea8184b41805070223d3a9
Parents: 19f0f96
Author: Kihwal Lee 
Authored: Fri Apr 22 11:08:44 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 11:08:44 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d4255a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 471e6b9..1f7a2f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3894,14 +3894,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   .debug("Namenode is in safemode, skipping scrubbing of 
corrupted lazy-persist files.");
 }
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error(
+  "Ignoring exception in LazyPersistFileScrubber:", e);
+}
+
+try {
   Thread.sleep(scrubIntervalSec * 1000);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(
   "LazyPersistFileScrubber was interrupted, exiting");
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(
-  "Ignoring exception in LazyPersistFileScrubber:", e);
 }
   }
 }



hadoop git commit: HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in the clear progress. Contributed by Phil Yang.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5df89f9a8 -> 3d3ed9b68


HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in 
the clear progress. Contributed by Phil Yang.

(cherry picked from commit 5d4255a80156d2cacfea8184b41805070223d3a9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d3ed9b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d3ed9b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d3ed9b6

Branch: refs/heads/branch-2.8
Commit: 3d3ed9b68e0b389ea9a604f0e27d95b093fad66b
Parents: 5df89f9
Author: Kihwal Lee 
Authored: Fri Apr 22 11:10:36 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 11:10:36 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d3ed9b6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7cad727..e36f454 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3896,14 +3896,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   .debug("Namenode is in safemode, skipping scrubbing of 
corrupted lazy-persist files.");
 }
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error(
+  "Ignoring exception in LazyPersistFileScrubber:", e);
+}
+
+try {
   Thread.sleep(scrubIntervalSec * 1000);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(
   "LazyPersistFileScrubber was interrupted, exiting");
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(
-  "Ignoring exception in LazyPersistFileScrubber:", e);
 }
   }
 }



hadoop git commit: HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in the clear progress. Contributed by Phil Yang.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 edcb2a841 -> 43cfe5943


HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in 
the clear progress. Contributed by Phil Yang.

(cherry picked from commit 5d4255a80156d2cacfea8184b41805070223d3a9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43cfe594
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43cfe594
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43cfe594

Branch: refs/heads/branch-2
Commit: 43cfe5943b13e1f2b838704eeec18f2b2b5b1bf3
Parents: edcb2a8
Author: Kihwal Lee 
Authored: Fri Apr 22 11:10:06 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 11:10:06 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43cfe594/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index be7ffa9..faebc89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3855,14 +3855,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   .debug("Namenode is in safemode, skipping scrubbing of 
corrupted lazy-persist files.");
 }
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error(
+  "Ignoring exception in LazyPersistFileScrubber:", e);
+}
+
+try {
   Thread.sleep(scrubIntervalSec * 1000);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(
   "LazyPersistFileScrubber was interrupted, exiting");
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(
-  "Ignoring exception in LazyPersistFileScrubber:", e);
 }
   }
 }



hadoop git commit: HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in the clear progress. Contributed by Phil Yang.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e7f1b8fcb -> 9e3b91b14


HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors in 
the clear progress. Contributed by Phil Yang.

(cherry picked from commit 5d4255a80156d2cacfea8184b41805070223d3a9)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e3b91b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e3b91b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e3b91b1

Branch: refs/heads/branch-2.7
Commit: 9e3b91b141fea7594f3e3dedddc7f99255a75938
Parents: e7f1b8f
Author: Kihwal Lee 
Authored: Fri Apr 22 11:47:18 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 11:47:18 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 9 ++---
 2 files changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e3b91b1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ea263b..085a33e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -158,6 +158,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-10239. Fsshell mv fails if port usage doesn't match in src and
 destination paths (Kuhu Shukla via kihwal)
 
+HDFS-9555. LazyPersistFileScrubber should still sleep if there are errors
+in the clear progress (Phil Yang via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e3b91b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 199286f..012fb68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4774,14 +4774,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   while (fsRunning && shouldRun) {
 try {
   clearCorruptLazyPersistFiles();
+} catch (Exception e) {
+  FSNamesystem.LOG.error(
+  "Ignoring exception in LazyPersistFileScrubber:", e);
+}
+
+try {
   Thread.sleep(scrubIntervalSec * 1000);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(
   "LazyPersistFileScrubber was interrupted, exiting");
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(
-  "Ignoring exception in LazyPersistFileScrubber:", e);
 }
   }
 }



hadoop git commit: HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed by Daryn Sharp.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7cb3a3da9 -> 9dbdc8e12


HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed 
by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dbdc8e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dbdc8e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dbdc8e1

Branch: refs/heads/trunk
Commit: 9dbdc8e12d009e76635b2d20ce940851725cb069
Parents: 7cb3a3d
Author: Kihwal Lee 
Authored: Fri Apr 22 15:02:46 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 15:02:46 2016 -0500

--
 .../apache/hadoop/fs/ChecksumFileSystem.java| 130 ---
 .../hadoop/fs/TestChecksumFileSystem.java   |  19 +++
 2 files changed, 134 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbdc8e1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 953d1c0..1f14c4d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -24,11 +24,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
+import java.util.List;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
@@ -155,11 +157,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   throw new IOException("Not a checksum file: "+sumFile);
 this.bytesPerSum = sums.readInt();
 set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
-  } catch (FileNotFoundException e) { // quietly ignore
-set(fs.verifyChecksum, null, 1, 0);
-  } catch (IOException e) {   // loudly ignore
-LOG.warn("Problem opening checksum file: "+ file + 
- ".  Ignoring exception: " , e); 
+  } catch (IOException e) {
+// mincing the message is terrible, but java throws permission
+// exceptions as FNF because that's all the method signatures allow!
+if (!(e instanceof FileNotFoundException) ||
+e.getMessage().endsWith(" (Permission denied)")) {
+  LOG.warn("Problem opening checksum file: "+ file +
+  ".  Ignoring exception: " , e);
+}
 set(fs.verifyChecksum, null, 1, 0);
   }
 }
@@ -478,6 +483,103 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 blockSize, progress);
   }
 
+  abstract class FsOperation {
+boolean run(Path p) throws IOException {
+  boolean status = apply(p);
+  if (status) {
+Path checkFile = getChecksumFile(p);
+if (fs.exists(checkFile)) {
+  apply(checkFile);
+}
+  }
+  return status;
+}
+abstract boolean apply(Path p) throws IOException;
+  }
+
+
+  @Override
+  public void setPermission(Path src, final FsPermission permission)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setPermission(p, permission);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void setOwner(Path src, final String username, final String groupname)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setOwner(p, username, groupname);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void setAcl(Path src, final List aclSpec)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setAcl(p, aclSpec);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void modifyAclEntries(Path src, final List aclSpec)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.modifyAclEntries(p, aclSpec);
+return true;
+ 

hadoop git commit: HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed by Daryn Sharp.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 45ff579bf -> d0718ed46


HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed 
by Daryn Sharp.

(cherry picked from commit 9dbdc8e12d009e76635b2d20ce940851725cb069)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0718ed4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0718ed4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0718ed4

Branch: refs/heads/branch-2
Commit: d0718ed466f8a386844887c8c0795e846ce936a6
Parents: 45ff579
Author: Kihwal Lee 
Authored: Fri Apr 22 15:03:56 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 15:03:56 2016 -0500

--
 .../apache/hadoop/fs/ChecksumFileSystem.java| 130 ---
 .../hadoop/fs/TestChecksumFileSystem.java   |  19 +++
 2 files changed, 134 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0718ed4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 641fdc2..46a235a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -24,11 +24,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
+import java.util.List;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
@@ -155,11 +157,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   throw new IOException("Not a checksum file: "+sumFile);
 this.bytesPerSum = sums.readInt();
 set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
-  } catch (FileNotFoundException e) { // quietly ignore
-set(fs.verifyChecksum, null, 1, 0);
-  } catch (IOException e) {   // loudly ignore
-LOG.warn("Problem opening checksum file: "+ file + 
- ".  Ignoring exception: " , e); 
+  } catch (IOException e) {
+// mincing the message is terrible, but java throws permission
+// exceptions as FNF because that's all the method signatures allow!
+if (!(e instanceof FileNotFoundException) ||
+e.getMessage().endsWith(" (Permission denied)")) {
+  LOG.warn("Problem opening checksum file: "+ file +
+  ".  Ignoring exception: " , e);
+}
 set(fs.verifyChecksum, null, 1, 0);
   }
 }
@@ -478,6 +483,103 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 blockSize, progress);
   }
 
+  abstract class FsOperation {
+boolean run(Path p) throws IOException {
+  boolean status = apply(p);
+  if (status) {
+Path checkFile = getChecksumFile(p);
+if (fs.exists(checkFile)) {
+  apply(checkFile);
+}
+  }
+  return status;
+}
+abstract boolean apply(Path p) throws IOException;
+  }
+
+
+  @Override
+  public void setPermission(Path src, final FsPermission permission)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setPermission(p, permission);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void setOwner(Path src, final String username, final String groupname)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setOwner(p, username, groupname);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void setAcl(Path src, final List aclSpec)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setAcl(p, aclSpec);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void modifyAclEntries(Path src, final List aclSpec)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOE

hadoop git commit: HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed by Daryn Sharp.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3d3ed9b68 -> 989cd895e


HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed 
by Daryn Sharp.

(cherry picked from commit 9dbdc8e12d009e76635b2d20ce940851725cb069)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/989cd895
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/989cd895
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/989cd895

Branch: refs/heads/branch-2.8
Commit: 989cd895e367ba2c716951fef3c6a0a430eecc23
Parents: 3d3ed9b
Author: Kihwal Lee 
Authored: Fri Apr 22 15:06:25 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 15:06:25 2016 -0500

--
 .../apache/hadoop/fs/ChecksumFileSystem.java| 130 ---
 .../hadoop/fs/TestChecksumFileSystem.java   |  19 +++
 2 files changed, 134 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/989cd895/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 641fdc2..46a235a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -24,11 +24,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
+import java.util.List;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
@@ -155,11 +157,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   throw new IOException("Not a checksum file: "+sumFile);
 this.bytesPerSum = sums.readInt();
 set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
-  } catch (FileNotFoundException e) { // quietly ignore
-set(fs.verifyChecksum, null, 1, 0);
-  } catch (IOException e) {   // loudly ignore
-LOG.warn("Problem opening checksum file: "+ file + 
- ".  Ignoring exception: " , e); 
+  } catch (IOException e) {
+// mincing the message is terrible, but java throws permission
+// exceptions as FNF because that's all the method signatures allow!
+if (!(e instanceof FileNotFoundException) ||
+e.getMessage().endsWith(" (Permission denied)")) {
+  LOG.warn("Problem opening checksum file: "+ file +
+  ".  Ignoring exception: " , e);
+}
 set(fs.verifyChecksum, null, 1, 0);
   }
 }
@@ -478,6 +483,103 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 blockSize, progress);
   }
 
+  abstract class FsOperation {
+boolean run(Path p) throws IOException {
+  boolean status = apply(p);
+  if (status) {
+Path checkFile = getChecksumFile(p);
+if (fs.exists(checkFile)) {
+  apply(checkFile);
+}
+  }
+  return status;
+}
+abstract boolean apply(Path p) throws IOException;
+  }
+
+
+  @Override
+  public void setPermission(Path src, final FsPermission permission)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setPermission(p, permission);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void setOwner(Path src, final String username, final String groupname)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setOwner(p, username, groupname);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void setAcl(Path src, final List aclSpec)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setAcl(p, aclSpec);
+return true;
+  }
+}.run(src);
+  }
+
+  @Override
+  public void modifyAclEntries(Path src, final List aclSpec)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(

hadoop git commit: HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed by Daryn Sharp.

2016-04-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 9e3b91b14 -> 0bb23e22c


HADOOP-13052. ChecksumFileSystem mishandles crc file permissions. Contributed 
by Daryn Sharp.

(cherry picked from commit 9dbdc8e12d009e76635b2d20ce940851725cb069)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bb23e22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bb23e22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bb23e22

Branch: refs/heads/branch-2.7
Commit: 0bb23e22cef74a6f6dbd46f77288f15fb69a0c03
Parents: 9e3b91b
Author: Kihwal Lee 
Authored: Fri Apr 22 15:22:42 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Apr 22 15:22:42 2016 -0500

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/fs/ChecksumFileSystem.java| 130 ---
 .../hadoop/fs/TestChecksumFileSystem.java   |  19 +++
 3 files changed, 137 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bb23e22/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a67c7c1..d33e5b3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -88,6 +88,9 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12989. Some tests in org.apache.hadoop.fs.shell.find occasionally
 time out. (Takashi Ohnishi via aajisaka)
 
+HADOOP-13052. ChecksumFileSystem mishandles crc file permissions.
+(Daryn Sharp via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bb23e22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index dddf0ce..c421a11 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -24,10 +24,12 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
@@ -151,11 +153,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   throw new IOException("Not a checksum file: "+sumFile);
 this.bytesPerSum = sums.readInt();
 set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
-  } catch (FileNotFoundException e) { // quietly ignore
-set(fs.verifyChecksum, null, 1, 0);
-  } catch (IOException e) {   // loudly ignore
-LOG.warn("Problem opening checksum file: "+ file + 
- ".  Ignoring exception: " , e); 
+  } catch (IOException e) {
+// mincing the message is terrible, but java throws permission
+// exceptions as FNF because that's all the method signatures allow!
+if (!(e instanceof FileNotFoundException) ||
+e.getMessage().endsWith(" (Permission denied)")) {
+  LOG.warn("Problem opening checksum file: "+ file +
+  ".  Ignoring exception: " , e);
+}
 set(fs.verifyChecksum, null, 1, 0);
   }
 }
@@ -476,6 +481,103 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
 blockSize, progress);
   }
 
+  abstract class FsOperation {
+boolean run(Path p) throws IOException {
+  boolean status = apply(p);
+  if (status) {
+Path checkFile = getChecksumFile(p);
+if (fs.exists(checkFile)) {
+  apply(checkFile);
+}
+  }
+  return status;
+}
+abstract boolean apply(Path p) throws IOException;
+  }
+
+
+  @Override
+  public void setPermission(Path src, final FsPermission permission)
+  throws IOException {
+new FsOperation(){
+  @Override
+  boolean apply(Path p) throws IOException {
+fs.setPermission(p, permission);
+return true;
+  }
+}.run(src)

hadoop git commit: HDFS-10318. TestJMXGet hides the real error in case of test failure. Contributed by Andras Bokor.

2016-04-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk bec5b4cd8 -> 10f0f7851


HDFS-10318. TestJMXGet hides the real error in case of test failure. 
Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10f0f785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10f0f785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10f0f785

Branch: refs/heads/trunk
Commit: 10f0f7851a3255caab775777e8fb6c2781d97062
Parents: bec5b4c
Author: Kihwal Lee 
Authored: Mon Apr 25 11:38:14 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 25 11:38:14 2016 -0500

--
 .../java/org/apache/hadoop/tools/TestJMXGet.java| 16 
 1 file changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10f0f785/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index f83e7d0..7abe5fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -31,6 +31,7 @@ import java.io.PrintStream;
 import java.lang.management.ManagementFactory;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeoutException;
 
 import javax.management.MBeanServerConnection;
 import javax.management.ObjectName;
@@ -55,6 +56,7 @@ import org.junit.Test;
  * 
  */
 public class TestJMXGet {
+  public static final String WRONG_METRIC_VALUE_ERROR_MSG = "Unable to get the 
correct value for %s.";
 
   private Configuration config;
   private MiniDFSCluster cluster;
@@ -106,9 +108,12 @@ public class TestJMXGet {
 assertTrue("error printAllValues", checkPrintAllValues(jmx));
 
 //get some data from different source
-DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
-assertEquals(numDatanodes, Integer.parseInt(
+try {
+  DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
+} catch (TimeoutException e) {
+assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"NumLiveDataNodes"),numDatanodes, Integer.parseInt(
 jmx.getValue("NumLiveDataNodes")));
+}
 assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
 getMetrics("FSNamesystem"));
 
@@ -158,8 +163,11 @@ public class TestJMXGet {
 String serviceName = "DataNode";
 jmx.setService(serviceName);
 jmx.init();
-DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
-assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+try {
+  DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
+} catch (TimeoutException e) {
+  assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"BytesWritten"), fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+}
 
 cluster.shutdown();
 MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();



hadoop git commit: HDFS-10318. TestJMXGet hides the real error in case of test failure. Contributed by Andras Bokor.

2016-04-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5bceaa968 -> f6bd9e07d


HDFS-10318. TestJMXGet hides the real error in case of test failure. 
Contributed by Andras Bokor.

(cherry picked from commit 10f0f7851a3255caab775777e8fb6c2781d97062)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6bd9e07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6bd9e07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6bd9e07

Branch: refs/heads/branch-2
Commit: f6bd9e07d2d238e692657ad8612f58a54d799a87
Parents: 5bceaa9
Author: Kihwal Lee 
Authored: Mon Apr 25 11:41:40 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 25 11:41:40 2016 -0500

--
 .../java/org/apache/hadoop/tools/TestJMXGet.java| 16 
 1 file changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6bd9e07/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index f64ee9d..36fdac4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -31,6 +31,7 @@ import java.io.PrintStream;
 import java.lang.management.ManagementFactory;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeoutException;
 
 import javax.management.MBeanServerConnection;
 import javax.management.ObjectName;
@@ -55,6 +56,7 @@ import org.junit.Test;
  * 
  */
 public class TestJMXGet {
+  public static final String WRONG_METRIC_VALUE_ERROR_MSG = "Unable to get the 
correct value for %s.";
 
   private Configuration config;
   private MiniDFSCluster cluster;
@@ -118,9 +120,12 @@ public class TestJMXGet {
 assertTrue("error printAllValues", checkPrintAllValues(jmx));
 
 //get some data from different source
-DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
-assertEquals(numDatanodes, Integer.parseInt(
+try {
+  DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
+} catch (TimeoutException e) {
+assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"NumLiveDataNodes"),numDatanodes, Integer.parseInt(
 jmx.getValue("NumLiveDataNodes")));
+}
 assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
 getMetrics("FSNamesystem"));
 
@@ -169,8 +174,11 @@ public class TestJMXGet {
 String serviceName = "DataNode";
 jmx.setService(serviceName);
 jmx.init();
-DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
-assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+try {
+  DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
+} catch (TimeoutException e) {
+  assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"BytesWritten"), fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+}
 
 cluster.shutdown();
 MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();



hadoop git commit: HDFS-10318. TestJMXGet hides the real error in case of test failure. Contributed by Andras Bokor.

2016-04-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 e01242edb -> 973c73aa6


HDFS-10318. TestJMXGet hides the real error in case of test failure. 
Contributed by Andras Bokor.

(cherry picked from commit 10f0f7851a3255caab775777e8fb6c2781d97062)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/973c73aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/973c73aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/973c73aa

Branch: refs/heads/branch-2.8
Commit: 973c73aa68b9339bcf69e49ab27e7a20ff2063be
Parents: e01242e
Author: Kihwal Lee 
Authored: Mon Apr 25 11:42:17 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 25 11:42:17 2016 -0500

--
 .../java/org/apache/hadoop/tools/TestJMXGet.java| 16 
 1 file changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/973c73aa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index f64ee9d..36fdac4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -31,6 +31,7 @@ import java.io.PrintStream;
 import java.lang.management.ManagementFactory;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.TimeoutException;
 
 import javax.management.MBeanServerConnection;
 import javax.management.ObjectName;
@@ -55,6 +56,7 @@ import org.junit.Test;
  * 
  */
 public class TestJMXGet {
+  public static final String WRONG_METRIC_VALUE_ERROR_MSG = "Unable to get the 
correct value for %s.";
 
   private Configuration config;
   private MiniDFSCluster cluster;
@@ -118,9 +120,12 @@ public class TestJMXGet {
 assertTrue("error printAllValues", checkPrintAllValues(jmx));
 
 //get some data from different source
-DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
-assertEquals(numDatanodes, Integer.parseInt(
+try {
+  DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
+} catch (TimeoutException e) {
+assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"NumLiveDataNodes"),numDatanodes, Integer.parseInt(
 jmx.getValue("NumLiveDataNodes")));
+}
 assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
 getMetrics("FSNamesystem"));
 
@@ -169,8 +174,11 @@ public class TestJMXGet {
 String serviceName = "DataNode";
 jmx.setService(serviceName);
 jmx.init();
-DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
-assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+try {
+  DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
+} catch (TimeoutException e) {
+  assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"BytesWritten"), fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+}
 
 cluster.shutdown();
 MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();



hadoop git commit: HDFS-10329. Bad initialisation of StringBuffer in RequestHedgingProxyProvider. Contributed by Lin Yiqun.

2016-04-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1a3f1482e -> c6a2430b4


HDFS-10329. Bad initialisation of StringBuffer in RequestHedgingProxyProvider. 
Contributed by Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6a2430b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6a2430b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6a2430b

Branch: refs/heads/trunk
Commit: c6a2430b47692a72c500373d6fc1bce78652675b
Parents: 1a3f148
Author: Kihwal Lee 
Authored: Tue Apr 26 11:45:29 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 26 11:45:29 2016 -0500

--
 .../hdfs/server/namenode/ha/RequestHedgingProxyProvider.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a2430b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index f4d2c6b..d8a516e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -164,7 +164,7 @@ public class RequestHedgingProxyProvider extends
   return successfulProxy;
 }
 Map> targetProxyInfos = new HashMap<>();
-StringBuilder combinedInfo = new StringBuilder('[');
+StringBuilder combinedInfo = new StringBuilder("[");
 for (int i = 0; i < proxies.size(); i++) {
   ProxyInfo pInfo = super.getProxy();
   incrementProxyIndex();



hadoop git commit: HDFS-10329. Bad initialisation of StringBuffer in RequestHedgingProxyProvider. Contributed by Lin Yiqun.

2016-04-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 52bfa90fe -> c991beac6


HDFS-10329. Bad initialisation of StringBuffer in RequestHedgingProxyProvider. 
Contributed by Lin Yiqun.

(cherry picked from commit c6a2430b47692a72c500373d6fc1bce78652675b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c991beac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c991beac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c991beac

Branch: refs/heads/branch-2
Commit: c991beac64a6ae09471c64720c96ab8042591640
Parents: 52bfa90
Author: Kihwal Lee 
Authored: Tue Apr 26 11:46:46 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 26 11:46:46 2016 -0500

--
 .../hdfs/server/namenode/ha/RequestHedgingProxyProvider.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c991beac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index f4d2c6b..d8a516e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -164,7 +164,7 @@ public class RequestHedgingProxyProvider extends
   return successfulProxy;
 }
 Map> targetProxyInfos = new HashMap<>();
-StringBuilder combinedInfo = new StringBuilder('[');
+StringBuilder combinedInfo = new StringBuilder("[");
 for (int i = 0; i < proxies.size(); i++) {
   ProxyInfo pInfo = super.getProxy();
   incrementProxyIndex();



hadoop git commit: HDFS-10329. Bad initialisation of StringBuffer in RequestHedgingProxyProvider. Contributed by Lin Yiqun.

2016-04-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 973c73aa6 -> 621be6059


HDFS-10329. Bad initialisation of StringBuffer in RequestHedgingProxyProvider. 
Contributed by Lin Yiqun.

(cherry picked from commit c6a2430b47692a72c500373d6fc1bce78652675b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/621be605
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/621be605
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/621be605

Branch: refs/heads/branch-2.8
Commit: 621be605920fecdfe2a798b40d62d0d70dcaee8a
Parents: 973c73a
Author: Kihwal Lee 
Authored: Tue Apr 26 11:47:15 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 26 11:47:15 2016 -0500

--
 .../hdfs/server/namenode/ha/RequestHedgingProxyProvider.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/621be605/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index f4d2c6b..d8a516e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -164,7 +164,7 @@ public class RequestHedgingProxyProvider extends
   return successfulProxy;
 }
 Map> targetProxyInfos = new HashMap<>();
-StringBuilder combinedInfo = new StringBuilder('[');
+StringBuilder combinedInfo = new StringBuilder("[");
 for (int i = 0; i < proxies.size(); i++) {
   ProxyInfo pInfo = super.getProxy();
   incrementProxyIndex();



hadoop git commit: HDFS-10330. Add Corrupt Blocks Information in Metasave output. Contributed by Kuhu Shukla.

2016-04-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 66b07d837 -> 919a1d824


HDFS-10330. Add Corrupt Blocks Information in Metasave output. Contributed by 
Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/919a1d82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/919a1d82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/919a1d82

Branch: refs/heads/trunk
Commit: 919a1d824a0a61145dc7ae59cfba3f34d91f2681
Parents: 66b07d8
Author: Kihwal Lee 
Authored: Wed Apr 27 08:19:48 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 27 08:19:48 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 43 +++
 .../blockmanagement/CorruptReplicasMap.java | 12 
 .../blockmanagement/TestBlockManager.java   | 58 
 .../hdfs/server/namenode/TestMetaSave.java  |  4 +-
 4 files changed, 116 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/919a1d82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ec52122..ff54f48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -109,6 +109,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.LightWeightGSet;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 
@@ -612,6 +613,48 @@ public class BlockManager implements BlockStatsMXBean {
 // Dump blocks that are waiting to be deleted
 invalidateBlocks.dump(out);
 
+//Dump corrupt blocks and their storageIDs
+Set corruptBlocks = corruptReplicas.getCorruptBlocks();
+out.println("Corrupt Blocks:");
+for(Block block : corruptBlocks) {
+  Collection corruptNodes =
+  corruptReplicas.getNodes(block);
+  if (corruptNodes == null) {
+LOG.warn(block.getBlockId() +
+" is corrupt but has no associated node.");
+continue;
+  }
+  int numNodesToFind = corruptNodes.size();
+  for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+DatanodeDescriptor node = storage.getDatanodeDescriptor();
+if (corruptNodes.contains(node)) {
+  String storageId = storage.getStorageID();
+  DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId);
+  State state = (storageInfo == null) ? null : storageInfo.getState();
+  out.println("Block=" + block.getBlockId() + "\tNode=" + 
node.getName()
+  + "\tStorageID=" + storageId + "\tStorageState=" + state
+  + "\tTotalReplicas=" +
+  blocksMap.numNodes(block)
+  + "\tReason=" + corruptReplicas.getCorruptReason(block, node));
+  numNodesToFind--;
+  if (numNodesToFind == 0) {
+break;
+  }
+}
+  }
+  if (numNodesToFind > 0) {
+String[] corruptNodesList = new String[corruptNodes.size()];
+int i = 0;
+for (DatanodeDescriptor d : corruptNodes) {
+  corruptNodesList[i] = d.getHostName();
+  i++;
+}
+out.println(block.getBlockId() + " corrupt on " +
+StringUtils.join(",", corruptNodesList) + " but not all nodes are" 
+
+"found in its block locations");
+  }
+}
+
 // Dump all datanodes
 getDatanodeManager().datanodeDump(out);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/919a1d82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index bd57ea2..35468da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ 
b/hadoop-hdfs-proj

hadoop git commit: HDFS-10330. Add Corrupt Blocks Information in Metasave output. Contributed by Kuhu Shukla.

2016-04-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cf3e93ee7 -> e181092b8


HDFS-10330. Add Corrupt Blocks Information in Metasave output. Contributed by 
Kuhu Shukla.

(cherry picked from commit 919a1d824a0a61145dc7ae59cfba3f34d91f2681)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e181092b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e181092b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e181092b

Branch: refs/heads/branch-2
Commit: e181092b86a8822e0f252b6ffa77baad4f711e77
Parents: cf3e93e
Author: Kihwal Lee 
Authored: Wed Apr 27 08:29:30 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 27 08:29:30 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 43 ++
 .../blockmanagement/CorruptReplicasMap.java | 12 
 .../blockmanagement/TestBlockManager.java   | 60 +++-
 .../hdfs/server/namenode/TestMetaSave.java  |  4 +-
 4 files changed, 117 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e181092b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ee9ca98..6288db9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -96,6 +96,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.LightWeightGSet;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -566,6 +567,48 @@ public class BlockManager implements BlockStatsMXBean {
 // Dump blocks that are waiting to be deleted
 invalidateBlocks.dump(out);
 
+//Dump corrupt blocks and their storageIDs
+Set corruptBlocks = corruptReplicas.getCorruptBlocks();
+out.println("Corrupt Blocks:");
+for(Block block : corruptBlocks) {
+  Collection corruptNodes =
+  corruptReplicas.getNodes(block);
+  if (corruptNodes == null) {
+LOG.warn(block.getBlockId() +
+" is corrupt but has no associated node.");
+continue;
+  }
+  int numNodesToFind = corruptNodes.size();
+  for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+DatanodeDescriptor node = storage.getDatanodeDescriptor();
+if (corruptNodes.contains(node)) {
+  String storageId = storage.getStorageID();
+  DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId);
+  State state = (storageInfo == null) ? null : storageInfo.getState();
+  out.println("Block=" + block.getBlockId() + "\tNode=" + 
node.getName()
+  + "\tStorageID=" + storageId + "\tStorageState=" + state
+  + "\tTotalReplicas=" +
+  blocksMap.numNodes(block)
+  + "\tReason=" + corruptReplicas.getCorruptReason(block, node));
+  numNodesToFind--;
+  if (numNodesToFind == 0) {
+break;
+  }
+}
+  }
+  if (numNodesToFind > 0) {
+String[] corruptNodesList = new String[corruptNodes.size()];
+int i = 0;
+for (DatanodeDescriptor d : corruptNodes) {
+  corruptNodesList[i] = d.getHostName();
+  i++;
+}
+out.println(block.getBlockId() + " corrupt on " +
+StringUtils.join(",", corruptNodesList) + " but not all nodes are" 
+
+"found in its block locations");
+  }
+}
+
 // Dump all datanodes
 getDatanodeManager().datanodeDump(out);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e181092b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/

hadoop git commit: HDFS-10330. Add Corrupt Blocks Information in Metasave output. Contributed by Kuhu Shukla.

2016-04-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 b39e9efee -> dd7c9f5b8


HDFS-10330. Add Corrupt Blocks Information in Metasave output. Contributed by 
Kuhu Shukla.

(cherry picked from commit e181092b86a8822e0f252b6ffa77baad4f711e77)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd7c9f5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd7c9f5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd7c9f5b

Branch: refs/heads/branch-2.8
Commit: dd7c9f5b8f08d90019231e18163087410d1c8931
Parents: b39e9ef
Author: Kihwal Lee 
Authored: Wed Apr 27 08:30:29 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Apr 27 08:30:29 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 43 ++
 .../blockmanagement/CorruptReplicasMap.java | 12 
 .../blockmanagement/TestBlockManager.java   | 60 +++-
 .../hdfs/server/namenode/TestMetaSave.java  |  4 +-
 4 files changed, 117 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7c9f5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 31fe95f..7bb2edc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -97,6 +97,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.LightWeightGSet;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -557,6 +558,48 @@ public class BlockManager implements BlockStatsMXBean {
 // Dump blocks that are waiting to be deleted
 invalidateBlocks.dump(out);
 
+//Dump corrupt blocks and their storageIDs
+Set corruptBlocks = corruptReplicas.getCorruptBlocks();
+out.println("Corrupt Blocks:");
+for(Block block : corruptBlocks) {
+  Collection corruptNodes =
+  corruptReplicas.getNodes(block);
+  if (corruptNodes == null) {
+LOG.warn(block.getBlockId() +
+" is corrupt but has no associated node.");
+continue;
+  }
+  int numNodesToFind = corruptNodes.size();
+  for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
+DatanodeDescriptor node = storage.getDatanodeDescriptor();
+if (corruptNodes.contains(node)) {
+  String storageId = storage.getStorageID();
+  DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId);
+  State state = (storageInfo == null) ? null : storageInfo.getState();
+  out.println("Block=" + block.getBlockId() + "\tNode=" + 
node.getName()
+  + "\tStorageID=" + storageId + "\tStorageState=" + state
+  + "\tTotalReplicas=" +
+  blocksMap.numNodes(block)
+  + "\tReason=" + corruptReplicas.getCorruptReason(block, node));
+  numNodesToFind--;
+  if (numNodesToFind == 0) {
+break;
+  }
+}
+  }
+  if (numNodesToFind > 0) {
+String[] corruptNodesList = new String[corruptNodes.size()];
+int i = 0;
+for (DatanodeDescriptor d : corruptNodes) {
+  corruptNodesList[i] = d.getHostName();
+  i++;
+}
+out.println(block.getBlockId() + " corrupt on " +
+StringUtils.join(",", corruptNodesList) + " but not all nodes are" 
+
+"found in its block locations");
+  }
+}
+
 // Dump all datanodes
 getDatanodeManager().datanodeDump(out);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7c9f5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index bd57ea2..35468da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/had

hadoop git commit: HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on failed storages. Contributed by Kuhu Shukla

2016-04-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk cf2ee45f7 -> 6243eabb4


HDFS-9958. BlockManager#createLocatedBlocks can throw NPE for corruptBlocks on 
failed storages. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6243eabb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6243eabb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6243eabb

Branch: refs/heads/trunk
Commit: 6243eabb48390fffada2418ade5adf9e0766afbe
Parents: cf2ee45
Author: Kihwal Lee 
Authored: Thu Apr 28 12:42:28 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Apr 28 12:44:53 2016 -0500

--
 .../server/blockmanagement/BlockManager.java| 23 --
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 87 +++-
 2 files changed, 103 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 70086e6..accfc38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1038,9 +1038,9 @@ public class BlockManager implements BlockStatsMXBean {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes != 0 &&
-numCorruptNodes == numNodes;
-final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
+final boolean isCorrupt = numCorruptReplicas != 0 &&
+numCorruptReplicas == numNodes;
+final int numMachines = isCorrupt ? numNodes: numNodes - 
numCorruptReplicas;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
 int j = 0, i = 0;
@@ -1366,11 +1366,22 @@ public class BlockManager implements BlockStatsMXBean {
   + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
   + ") does not exist");
 }
-
+DatanodeStorageInfo storage = null;
+if (storageID != null) {
+  storage = node.getStorageInfo(storageID);
+}
+if (storage == null) {
+  storage = storedBlock.findStorageInfo(node);
+}
+
+if (storage == null) {
+  blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
+  blk, dn);
+  return;
+}
 markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
 blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-storageID == null ? null : node.getStorageInfo(storageID),
-node);
+storage, node);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6243eabb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index c1a7ebb..011baa1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -18,15 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -36,6 +43,8 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apac

hadoop git commit: HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp.

2017-02-10 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b7a7bbe0 -> 07a5184f7


HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07a5184f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07a5184f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07a5184f

Branch: refs/heads/trunk
Commit: 07a5184f74fdeffc42cdaec42ad4378c0e41c541
Parents: 2b7a7bb
Author: Kihwal Lee 
Authored: Fri Feb 10 12:27:08 2017 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 10 12:27:08 2017 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 48 --
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 51 
 2 files changed, 70 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07a5184f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 5783f90..39d0eed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -421,33 +421,36 @@ public class DFSInputStream extends FSInputStream
   }
   else {
 // search cached blocks first
-int targetBlockIdx = locatedBlocks.findBlock(offset);
-if (targetBlockIdx < 0) { // block is not cached
-  targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
-  // fetch more blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, 
offset);
-  assert (newBlocks != null) : "Could not find target position " + 
offset;
-  locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
-}
-blk = locatedBlocks.get(targetBlockIdx);
+blk = fetchBlockAt(offset, 0, true);
   }
   return blk;
 }
   }
 
   /** Fetch a block from namenode and cache it */
-  protected void fetchBlockAt(long offset) throws IOException {
+  protected LocatedBlock fetchBlockAt(long offset) throws IOException {
+return fetchBlockAt(offset, 0, false); // don't use cache
+  }
+
+  /** Fetch a block from namenode and cache it */
+  private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache)
+  throws IOException {
 synchronized(infoLock) {
   int targetBlockIdx = locatedBlocks.findBlock(offset);
   if (targetBlockIdx < 0) { // block is not cached
 targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+useCache = false;
   }
-  // fetch blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-  if (newBlocks == null) {
-throw new IOException("Could not find target position " + offset);
+  if (!useCache) { // fetch blocks
+final LocatedBlocks newBlocks = (length == 0)
+? dfsClient.getLocatedBlocks(src, offset)
+: dfsClient.getLocatedBlocks(src, offset, length);
+if (newBlocks == null || newBlocks.locatedBlockCount() == 0) {
+  throw new EOFException("Could not find target position " + offset);
+}
+locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
   }
-  locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+  return locatedBlocks.get(targetBlockIdx);
 }
   }
 
@@ -502,28 +505,15 @@ public class DFSInputStream extends FSInputStream
   assert (locatedBlocks != null) : "locatedBlocks is null";
   List blockRange = new ArrayList<>();
   // search cached blocks first
-  int blockIdx = locatedBlocks.findBlock(offset);
-  if (blockIdx < 0) { // block is not cached
-blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
-  }
   long remaining = length;
   long curOff = offset;
   while(remaining > 0) {
-LocatedBlock blk = null;
-if(blockIdx < locatedBlocks.locatedBlockCount())
-  blk = locatedBlocks.get(blockIdx);
-if (blk == null || curOff < blk.getStartOffset()) {
-  LocatedBlocks newBlocks;
-  newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
-  locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
-  continue;
-}
+LocatedBlock blk = fetchBlockAt(curOff, remaining, 

hadoop git commit: HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp.

2017-02-10 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5d0ec2e24 -> 22f8b6613


HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.

(cherry picked from commit 33c62d2d19cd80b9c0cb9f46a635f37080dbb27c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22f8b661
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22f8b661
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22f8b661

Branch: refs/heads/branch-2.8
Commit: 22f8b6613707e7d08905656f2e11c6c24a6a8533
Parents: 5d0ec2e
Author: Kihwal Lee 
Authored: Fri Feb 10 12:56:44 2017 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 10 12:56:44 2017 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 48 +++---
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 52 +++-
 2 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22f8b661/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 463ce23..873fb03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -505,33 +505,36 @@ public class DFSInputStream extends FSInputStream
   }
   else {
 // search cached blocks first
-int targetBlockIdx = locatedBlocks.findBlock(offset);
-if (targetBlockIdx < 0) { // block is not cached
-  targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
-  // fetch more blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, 
offset);
-  assert (newBlocks != null) : "Could not find target position " + 
offset;
-  locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
-}
-blk = locatedBlocks.get(targetBlockIdx);
+blk = fetchBlockAt(offset, 0, true);
   }
   return blk;
 }
   }
 
   /** Fetch a block from namenode and cache it */
-  protected void fetchBlockAt(long offset) throws IOException {
+  protected LocatedBlock fetchBlockAt(long offset) throws IOException {
+return fetchBlockAt(offset, 0, false); // don't use cache
+  }
+
+  /** Fetch a block from namenode and cache it */
+  private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache)
+  throws IOException {
 synchronized(infoLock) {
   int targetBlockIdx = locatedBlocks.findBlock(offset);
   if (targetBlockIdx < 0) { // block is not cached
 targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+useCache = false;
   }
-  // fetch blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-  if (newBlocks == null) {
-throw new IOException("Could not find target position " + offset);
+  if (!useCache) { // fetch blocks
+final LocatedBlocks newBlocks = (length == 0)
+? dfsClient.getLocatedBlocks(src, offset)
+: dfsClient.getLocatedBlocks(src, offset, length);
+if (newBlocks == null || newBlocks.locatedBlockCount() == 0) {
+  throw new EOFException("Could not find target position " + offset);
+}
+locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
   }
-  locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+  return locatedBlocks.get(targetBlockIdx);
 }
   }
 
@@ -586,28 +589,15 @@ public class DFSInputStream extends FSInputStream
   assert (locatedBlocks != null) : "locatedBlocks is null";
   List blockRange = new ArrayList<>();
   // search cached blocks first
-  int blockIdx = locatedBlocks.findBlock(offset);
-  if (blockIdx < 0) { // block is not cached
-blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
-  }
   long remaining = length;
   long curOff = offset;
   while(remaining > 0) {
-LocatedBlock blk = null;
-if(blockIdx < locatedBlocks.locatedBlockCount())
-  blk = locatedBlocks.get(blockIdx);
-if (blk == null || curOff < blk.getStartOffset()) {
-  LocatedBlocks newBlocks;
-  newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
-  locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
-  continue;
-  

hadoop git commit: HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp.

2017-02-10 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c88ec5458 -> 33c62d2d1


HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33c62d2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33c62d2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33c62d2d

Branch: refs/heads/branch-2
Commit: 33c62d2d19cd80b9c0cb9f46a635f37080dbb27c
Parents: c88ec54
Author: Kihwal Lee 
Authored: Fri Feb 10 12:52:14 2017 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 10 12:52:14 2017 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 48 +++---
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 52 +++-
 2 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c62d2d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 669485a..0608d6f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -511,33 +511,36 @@ public class DFSInputStream extends FSInputStream
   }
   else {
 // search cached blocks first
-int targetBlockIdx = locatedBlocks.findBlock(offset);
-if (targetBlockIdx < 0) { // block is not cached
-  targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
-  // fetch more blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, 
offset);
-  assert (newBlocks != null) : "Could not find target position " + 
offset;
-  locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
-}
-blk = locatedBlocks.get(targetBlockIdx);
+blk = fetchBlockAt(offset, 0, true);
   }
   return blk;
 }
   }
 
   /** Fetch a block from namenode and cache it */
-  protected void fetchBlockAt(long offset) throws IOException {
+  protected LocatedBlock fetchBlockAt(long offset) throws IOException {
+return fetchBlockAt(offset, 0, false); // don't use cache
+  }
+
+  /** Fetch a block from namenode and cache it */
+  private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache)
+  throws IOException {
 synchronized(infoLock) {
   int targetBlockIdx = locatedBlocks.findBlock(offset);
   if (targetBlockIdx < 0) { // block is not cached
 targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+useCache = false;
   }
-  // fetch blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-  if (newBlocks == null) {
-throw new IOException("Could not find target position " + offset);
+  if (!useCache) { // fetch blocks
+final LocatedBlocks newBlocks = (length == 0)
+? dfsClient.getLocatedBlocks(src, offset)
+: dfsClient.getLocatedBlocks(src, offset, length);
+if (newBlocks == null || newBlocks.locatedBlockCount() == 0) {
+  throw new EOFException("Could not find target position " + offset);
+}
+locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
   }
-  locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+  return locatedBlocks.get(targetBlockIdx);
 }
   }
 
@@ -592,28 +595,15 @@ public class DFSInputStream extends FSInputStream
   assert (locatedBlocks != null) : "locatedBlocks is null";
   List blockRange = new ArrayList<>();
   // search cached blocks first
-  int blockIdx = locatedBlocks.findBlock(offset);
-  if (blockIdx < 0) { // block is not cached
-blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
-  }
   long remaining = length;
   long curOff = offset;
   while(remaining > 0) {
-LocatedBlock blk = null;
-if(blockIdx < locatedBlocks.locatedBlockCount())
-  blk = locatedBlocks.get(blockIdx);
-if (blk == null || curOff < blk.getStartOffset()) {
-  LocatedBlocks newBlocks;
-  newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
-  locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
-  continue;
-}
+LocatedBlock blk = fetchBlockAt(curOff, remaining, 

hadoop git commit: HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp.

2017-02-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.0 bfa04500d -> cf91cc2b0


HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.

(cherry picked from commit 22f8b6613707e7d08905656f2e11c6c24a6a8533)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf91cc2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf91cc2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf91cc2b

Branch: refs/heads/branch-2.8.0
Commit: cf91cc2b0f5c71aa300756373d5a533a431239bc
Parents: bfa0450
Author: Kihwal Lee 
Authored: Mon Feb 13 08:42:35 2017 -0600
Committer: Kihwal Lee 
Committed: Mon Feb 13 08:42:35 2017 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 48 +++---
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 52 +++-
 2 files changed, 70 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf91cc2b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 463ce23..873fb03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -505,33 +505,36 @@ public class DFSInputStream extends FSInputStream
   }
   else {
 // search cached blocks first
-int targetBlockIdx = locatedBlocks.findBlock(offset);
-if (targetBlockIdx < 0) { // block is not cached
-  targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
-  // fetch more blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, 
offset);
-  assert (newBlocks != null) : "Could not find target position " + 
offset;
-  locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
-}
-blk = locatedBlocks.get(targetBlockIdx);
+blk = fetchBlockAt(offset, 0, true);
   }
   return blk;
 }
   }
 
   /** Fetch a block from namenode and cache it */
-  protected void fetchBlockAt(long offset) throws IOException {
+  protected LocatedBlock fetchBlockAt(long offset) throws IOException {
+return fetchBlockAt(offset, 0, false); // don't use cache
+  }
+
+  /** Fetch a block from namenode and cache it */
+  private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache)
+  throws IOException {
 synchronized(infoLock) {
   int targetBlockIdx = locatedBlocks.findBlock(offset);
   if (targetBlockIdx < 0) { // block is not cached
 targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+useCache = false;
   }
-  // fetch blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-  if (newBlocks == null) {
-throw new IOException("Could not find target position " + offset);
+  if (!useCache) { // fetch blocks
+final LocatedBlocks newBlocks = (length == 0)
+? dfsClient.getLocatedBlocks(src, offset)
+: dfsClient.getLocatedBlocks(src, offset, length);
+if (newBlocks == null || newBlocks.locatedBlockCount() == 0) {
+  throw new EOFException("Could not find target position " + offset);
+}
+locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
   }
-  locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+  return locatedBlocks.get(targetBlockIdx);
 }
   }
 
@@ -586,28 +589,15 @@ public class DFSInputStream extends FSInputStream
   assert (locatedBlocks != null) : "locatedBlocks is null";
   List blockRange = new ArrayList<>();
   // search cached blocks first
-  int blockIdx = locatedBlocks.findBlock(offset);
-  if (blockIdx < 0) { // block is not cached
-blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
-  }
   long remaining = length;
   long curOff = offset;
   while(remaining > 0) {
-LocatedBlock blk = null;
-if(blockIdx < locatedBlocks.locatedBlockCount())
-  blk = locatedBlocks.get(blockIdx);
-if (blk == null || curOff < blk.getStartOffset()) {
-  LocatedBlocks newBlocks;
-  newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
-  locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
-  continue;
-  

hadoop git commit: HADOOP-13976. Path globbing does not match newlines. Contributed by Eric Badger.

2017-01-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 695f110f9 -> 706d630eb


HADOOP-13976. Path globbing does not match newlines. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/706d630e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/706d630e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/706d630e

Branch: refs/heads/trunk
Commit: 706d630eb9db9658083d57d1d99b6a0f11cc5657
Parents: 695f110
Author: Kihwal Lee 
Authored: Tue Jan 17 15:10:24 2017 -0600
Committer: Kihwal Lee 
Committed: Tue Jan 17 15:10:24 2017 -0600

--
 .../src/main/java/org/apache/hadoop/fs/GlobPattern.java  | 2 +-
 .../src/test/java/org/apache/hadoop/fs/TestGlobPattern.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/706d630e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
index 16505a6..c5cf3d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
@@ -153,7 +153,7 @@ public class GlobPattern {
 if (curlyOpen > 0) {
   error("Unclosed group", glob, len);
 }
-compiled = Pattern.compile(regex.toString());
+compiled = Pattern.compile(regex.toString(), Pattern.DOTALL);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/706d630e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
index 356c442..99cb645 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
@@ -50,11 +50,11 @@ public class TestGlobPattern {
   }
 
   @Test public void testValidPatterns() {
-assertMatch(true, "*", "^$", "foo", "bar");
+assertMatch(true, "*", "^$", "foo", "bar", "\n");
 assertMatch(true, "?", "?", "^", "[", "]", "$");
-assertMatch(true, "foo*", "foo", "food", "fool");
-assertMatch(true, "f*d", "fud", "food");
-assertMatch(true, "*d", "good", "bad");
+assertMatch(true, "foo*", "foo", "food", "fool", "foo\n", "foo\nbar");
+assertMatch(true, "f*d", "fud", "food", "foo\nd");
+assertMatch(true, "*d", "good", "bad", "\nd");
 assertMatch(true, "\\*\\?\\[\\{", "*?[{\\");
 assertMatch(true, "[]^-]", "]", "-", "^");
 assertMatch(true, "]", "]");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13976. Path globbing does not match newlines. Contributed by Eric Badger.

2017-01-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eae5bde00 -> 3017069a7


HADOOP-13976. Path globbing does not match newlines. Contributed by Eric Badger.

(cherry picked from commit 706d630eb9db9658083d57d1d99b6a0f11cc5657)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3017069a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3017069a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3017069a

Branch: refs/heads/branch-2
Commit: 3017069a761dfa624670849b83ce752f5270adc3
Parents: eae5bde
Author: Kihwal Lee 
Authored: Tue Jan 17 15:12:50 2017 -0600
Committer: Kihwal Lee 
Committed: Tue Jan 17 15:12:50 2017 -0600

--
 .../src/main/java/org/apache/hadoop/fs/GlobPattern.java  | 2 +-
 .../src/test/java/org/apache/hadoop/fs/TestGlobPattern.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3017069a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
index 4be5b1c..c214609 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
@@ -153,7 +153,7 @@ public class GlobPattern {
 if (curlyOpen > 0) {
   error("Unclosed group", glob, len);
 }
-compiled = Pattern.compile(regex.toString());
+compiled = Pattern.compile(regex.toString(), Pattern.DOTALL);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3017069a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
index 0fffc47..128ac23 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
@@ -50,11 +50,11 @@ public class TestGlobPattern {
   }
 
   @Test public void testValidPatterns() {
-assertMatch(true, "*", "^$", "foo", "bar");
+assertMatch(true, "*", "^$", "foo", "bar", "\n");
 assertMatch(true, "?", "?", "^", "[", "]", "$");
-assertMatch(true, "foo*", "foo", "food", "fool");
-assertMatch(true, "f*d", "fud", "food");
-assertMatch(true, "*d", "good", "bad");
+assertMatch(true, "foo*", "foo", "food", "fool", "foo\n", "foo\nbar");
+assertMatch(true, "f*d", "fud", "food", "foo\nd");
+assertMatch(true, "*d", "good", "bad", "\nd");
 assertMatch(true, "\\*\\?\\[\\{", "*?[{\\");
 assertMatch(true, "[]^-]", "]", "-", "^");
 assertMatch(true, "]", "]");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13976. Path globbing does not match newlines. Contributed by Eric Badger.

2017-01-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 fbcc45c8d -> 7d949cc43


HADOOP-13976. Path globbing does not match newlines. Contributed by Eric Badger.

(cherry picked from commit 706d630eb9db9658083d57d1d99b6a0f11cc5657)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d949cc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d949cc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d949cc4

Branch: refs/heads/branch-2.8
Commit: 7d949cc43250e1c0eae7ba8864511be0340c3e97
Parents: fbcc45c
Author: Kihwal Lee 
Authored: Tue Jan 17 15:13:56 2017 -0600
Committer: Kihwal Lee 
Committed: Tue Jan 17 15:13:56 2017 -0600

--
 .../src/main/java/org/apache/hadoop/fs/GlobPattern.java  | 2 +-
 .../src/test/java/org/apache/hadoop/fs/TestGlobPattern.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d949cc4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
index 4be5b1c..c214609 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
@@ -153,7 +153,7 @@ public class GlobPattern {
 if (curlyOpen > 0) {
   error("Unclosed group", glob, len);
 }
-compiled = Pattern.compile(regex.toString());
+compiled = Pattern.compile(regex.toString(), Pattern.DOTALL);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d949cc4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
index 0fffc47..128ac23 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobPattern.java
@@ -50,11 +50,11 @@ public class TestGlobPattern {
   }
 
   @Test public void testValidPatterns() {
-assertMatch(true, "*", "^$", "foo", "bar");
+assertMatch(true, "*", "^$", "foo", "bar", "\n");
 assertMatch(true, "?", "?", "^", "[", "]", "$");
-assertMatch(true, "foo*", "foo", "food", "fool");
-assertMatch(true, "f*d", "fud", "food");
-assertMatch(true, "*d", "good", "bad");
+assertMatch(true, "foo*", "foo", "food", "fool", "foo\n", "foo\nbar");
+assertMatch(true, "f*d", "fud", "food", "foo\nd");
+assertMatch(true, "*d", "good", "bad", "\nd");
 assertMatch(true, "\\*\\?\\[\\{", "*?[{\\");
 assertMatch(true, "[]^-]", "]", "-", "^");
 assertMatch(true, "]", "]");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14050. Add process name to kms process. Contributed by Rushabh S Shah.

2017-02-03 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f17d3eddc -> 7baa6dac5


HADOOP-14050. Add process name to kms process. Contributed by Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7baa6dac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7baa6dac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7baa6dac

Branch: refs/heads/branch-2
Commit: 7baa6dac54417d564a9233c2f76bf310bf63a822
Parents: f17d3ed
Author: Kihwal Lee 
Authored: Fri Feb 3 15:38:55 2017 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 3 15:38:55 2017 -0600

--
 hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7baa6dac/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh 
b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
index fb8ba33..dc14054 100644
--- a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
@@ -58,7 +58,8 @@ KMS_SSL_TRUSTSTORE_PASS=`echo $CATALINA_OPTS | grep -o 
'trustStorePassword=[^ ]*
 CATALINA_OPTS_DISP=`echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ 
]*/trustStorePassword=***/'`
 print "Using   CATALINA_OPTS:   ${CATALINA_OPTS_DISP}"
 
-catalina_opts="-Dkms.home.dir=${KMS_HOME}";
+catalina_opts="-Dproc_kms"
+catalina_opts="${catalina_opts} -Dkms.home.dir=${KMS_HOME}";
 catalina_opts="${catalina_opts} -Dkms.config.dir=${KMS_CONFIG}";
 catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}";
 catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14050. Add process name to kms process. Contributed by Rushabh S Shah.

2017-02-03 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4e423edd9 -> 2bbcaa8ad


HADOOP-14050. Add process name to kms process. Contributed by Rushabh S Shah.

(cherry picked from commit 7baa6dac54417d564a9233c2f76bf310bf63a822)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bbcaa8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bbcaa8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bbcaa8a

Branch: refs/heads/branch-2.8
Commit: 2bbcaa8ad0f50e9afe3b7945960984150c122c17
Parents: 4e423ed
Author: Kihwal Lee 
Authored: Fri Feb 3 15:40:34 2017 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 3 15:40:34 2017 -0600

--
 hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bbcaa8a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh 
b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
index 5232068..7c952d8 100644
--- a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
@@ -58,7 +58,8 @@ KMS_SSL_TRUSTSTORE_PASS=`echo $CATALINA_OPTS | grep -o 
'trustStorePassword=[^ ]*
 CATALINA_OPTS_DISP=`echo ${CATALINA_OPTS} | sed -e 's/trustStorePassword=[^ 
]*/trustStorePassword=***/'`
 print "Using   CATALINA_OPTS:   ${CATALINA_OPTS_DISP}"
 
-catalina_opts="-Dkms.home.dir=${KMS_HOME}";
+catalina_opts="-Dproc_kms"
+catalina_opts="${catalina_opts} -Dkms.home.dir=${KMS_HOME}";
 catalina_opts="${catalina_opts} -Dkms.config.dir=${KMS_CONFIG}";
 catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}";
 catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14032. Reduce fair call queue priority inversion. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk a8a594b4c -> a0bfb4150


HADOOP-14032. Reduce fair call queue priority inversion. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0bfb415
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0bfb415
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0bfb415

Branch: refs/heads/trunk
Commit: a0bfb4150464013a618f30c2e38d88fc6de11ad2
Parents: a8a594b
Author: Kihwal Lee 
Authored: Thu Feb 9 10:04:28 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 10:04:28 2017 -0600

--
 .../org/apache/hadoop/ipc/FairCallQueue.java| 12 +++--
 .../apache/hadoop/ipc/TestFairCallQueue.java| 57 +++-
 2 files changed, 63 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0bfb415/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index c2d3cd8..77a9d65 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -112,19 +112,21 @@ public class FairCallQueue extends 
AbstractQueue
   }
 
   /**
-   * Returns the first non-empty queue with equal or lesser priority
-   * than startIdx. Wraps around, searching a maximum of N
-   * queues, where N is this.queues.size().
+   * Returns the first non-empty queue with equal to startIdx, or
+   * or scans from highest to lowest priority queue.
*
* @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
   private BlockingQueue getFirstNonEmptyQueue(int startIdx) {
+BlockingQueue queue = this.queues.get(startIdx);
+if (queue.size() != 0) {
+  return queue;
+}
 final int numQueues = this.queues.size();
 for(int i=0; i < numQueues; i++) {
-  int idx = (i + startIdx) % numQueues; // offset and wrap around
-  BlockingQueue queue = this.queues.get(idx);
+  queue = this.queues.get(i);
   if (queue.size() != 0) {
 return queue;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0bfb415/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index 96dea80..901a771 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -28,9 +28,12 @@ import javax.management.ObjectName;
 import java.lang.management.ManagementFactory;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.BlockingQueue;
-
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
 import org.apache.hadoop.conf.Configuration;
 
 public class TestFairCallQueue extends TestCase {
@@ -43,6 +46,7 @@ public class TestFairCallQueue extends TestCase {
 when(ugi.getUserName()).thenReturn(id);
 when(mockCall.getUserGroupInformation()).thenReturn(ugi);
 when(mockCall.getPriorityLevel()).thenReturn(priority);
+when(mockCall.toString()).thenReturn("id=" + id + " priority=" + priority);
 
 return mockCall;
   }
@@ -78,6 +82,57 @@ public class TestFairCallQueue extends TestCase {
 assertEquals(fairCallQueue.remainingCapacity(), 1025);
   }
 
+  @Test
+  public void testPrioritization() {
+int numQueues = 10;
+Configuration conf = new Configuration();
+fcq = new FairCallQueue(numQueues, numQueues, "ns", conf);
+
+//Schedulable[] calls = new Schedulable[numCalls];
+List calls = new ArrayList<>();
+for (int i=0; i < numQueues; i++) {
+  Schedulable call = mockCall("u", i);
+  calls.add(call);
+  fcq.add(call);
+}
+
+final AtomicInteger currentIndex = new AtomicInteger();
+fcq.setMultiplexer(new RpcMultiplexer(){
+  @Override
+  public int getAndAdvanceCurrentIndex() {
+return curr

hadoop git commit: HADOOP-14032. Reduce fair call queue priority inversion. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e330c3806 -> 2039d021f


HADOOP-14032. Reduce fair call queue priority inversion. Contributed by Daryn 
Sharp.

(cherry picked from commit a0bfb4150464013a618f30c2e38d88fc6de11ad2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2039d021
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2039d021
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2039d021

Branch: refs/heads/branch-2
Commit: 2039d021f986ef09b0c132c2a35463f8cbd50331
Parents: e330c38
Author: Kihwal Lee 
Authored: Thu Feb 9 10:13:37 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 10:13:37 2017 -0600

--
 .../org/apache/hadoop/ipc/FairCallQueue.java| 12 +++--
 .../apache/hadoop/ipc/TestFairCallQueue.java| 57 +++-
 2 files changed, 63 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2039d021/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index c2d3cd8..77a9d65 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -112,19 +112,21 @@ public class FairCallQueue extends 
AbstractQueue
   }
 
   /**
-   * Returns the first non-empty queue with equal or lesser priority
-   * than startIdx. Wraps around, searching a maximum of N
-   * queues, where N is this.queues.size().
+   * Returns the first non-empty queue with equal to startIdx, or
+   * or scans from highest to lowest priority queue.
*
* @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
   private BlockingQueue getFirstNonEmptyQueue(int startIdx) {
+BlockingQueue queue = this.queues.get(startIdx);
+if (queue.size() != 0) {
+  return queue;
+}
 final int numQueues = this.queues.size();
 for(int i=0; i < numQueues; i++) {
-  int idx = (i + startIdx) % numQueues; // offset and wrap around
-  BlockingQueue queue = this.queues.get(idx);
+  queue = this.queues.get(i);
   if (queue.size() != 0) {
 return queue;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2039d021/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index 96dea80..901a771 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -28,9 +28,12 @@ import javax.management.ObjectName;
 import java.lang.management.ManagementFactory;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.BlockingQueue;
-
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
 import org.apache.hadoop.conf.Configuration;
 
 public class TestFairCallQueue extends TestCase {
@@ -43,6 +46,7 @@ public class TestFairCallQueue extends TestCase {
 when(ugi.getUserName()).thenReturn(id);
 when(mockCall.getUserGroupInformation()).thenReturn(ugi);
 when(mockCall.getPriorityLevel()).thenReturn(priority);
+when(mockCall.toString()).thenReturn("id=" + id + " priority=" + priority);
 
 return mockCall;
   }
@@ -78,6 +82,57 @@ public class TestFairCallQueue extends TestCase {
 assertEquals(fairCallQueue.remainingCapacity(), 1025);
   }
 
+  @Test
+  public void testPrioritization() {
+int numQueues = 10;
+Configuration conf = new Configuration();
+fcq = new FairCallQueue(numQueues, numQueues, "ns", conf);
+
+//Schedulable[] calls = new Schedulable[numCalls];
+List calls = new ArrayList<>();
+for (int i=0; i < numQueues; i++) {
+  Schedulable call = mockCall("u", i);
+  calls.add(call);
+  fcq.add(call);
+}
+
+final AtomicInteger currentIndex = new AtomicInteger();
+fcq.setMultiplexer(new RpcMultiplexer(){
+

hadoop git commit: HADOOP-14032. Reduce fair call queue priority inversion. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 04076e783 -> bb98856af


HADOOP-14032. Reduce fair call queue priority inversion. Contributed by Daryn 
Sharp.

(cherry picked from commit a0bfb4150464013a618f30c2e38d88fc6de11ad2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb98856a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb98856a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb98856a

Branch: refs/heads/branch-2.8
Commit: bb98856af913801909c5c5a80f87e50dd73967d1
Parents: 04076e7
Author: Kihwal Lee 
Authored: Thu Feb 9 10:14:18 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 10:14:18 2017 -0600

--
 .../org/apache/hadoop/ipc/FairCallQueue.java| 12 +++--
 .../apache/hadoop/ipc/TestFairCallQueue.java| 57 +++-
 2 files changed, 63 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb98856a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index c2d3cd8..77a9d65 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -112,19 +112,21 @@ public class FairCallQueue extends 
AbstractQueue
   }
 
   /**
-   * Returns the first non-empty queue with equal or lesser priority
-   * than startIdx. Wraps around, searching a maximum of N
-   * queues, where N is this.queues.size().
+   * Returns the first non-empty queue with equal to startIdx, or
+   * or scans from highest to lowest priority queue.
*
* @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
   private BlockingQueue getFirstNonEmptyQueue(int startIdx) {
+BlockingQueue queue = this.queues.get(startIdx);
+if (queue.size() != 0) {
+  return queue;
+}
 final int numQueues = this.queues.size();
 for(int i=0; i < numQueues; i++) {
-  int idx = (i + startIdx) % numQueues; // offset and wrap around
-  BlockingQueue queue = this.queues.get(idx);
+  queue = this.queues.get(i);
   if (queue.size() != 0) {
 return queue;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb98856a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index 96dea80..901a771 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -28,9 +28,12 @@ import javax.management.ObjectName;
 import java.lang.management.ManagementFactory;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.BlockingQueue;
-
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
 import org.apache.hadoop.conf.Configuration;
 
 public class TestFairCallQueue extends TestCase {
@@ -43,6 +46,7 @@ public class TestFairCallQueue extends TestCase {
 when(ugi.getUserName()).thenReturn(id);
 when(mockCall.getUserGroupInformation()).thenReturn(ugi);
 when(mockCall.getPriorityLevel()).thenReturn(priority);
+when(mockCall.toString()).thenReturn("id=" + id + " priority=" + priority);
 
 return mockCall;
   }
@@ -78,6 +82,57 @@ public class TestFairCallQueue extends TestCase {
 assertEquals(fairCallQueue.remainingCapacity(), 1025);
   }
 
+  @Test
+  public void testPrioritization() {
+int numQueues = 10;
+Configuration conf = new Configuration();
+fcq = new FairCallQueue(numQueues, numQueues, "ns", conf);
+
+//Schedulable[] calls = new Schedulable[numCalls];
+List calls = new ArrayList<>();
+for (int i=0; i < numQueues; i++) {
+  Schedulable call = mockCall("u", i);
+  calls.add(call);
+  fcq.add(call);
+}
+
+final AtomicInteger currentIndex = new AtomicInteger();
+fcq.setMultiplexer(new RpcMultiplexer(){
+

hadoop git commit: HADOOP-14034. Allow ipc layer exceptions to selectively close connections. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2039d021f -> d008b5515


HADOOP-14034. Allow ipc layer exceptions to selectively close connections. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d008b551
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d008b551
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d008b551

Branch: refs/heads/branch-2
Commit: d008b5515304b42faeb48e542c8c27586b8564eb
Parents: 2039d02
Author: Kihwal Lee 
Authored: Thu Feb 9 10:55:40 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 10:55:40 2017 -0600

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 209 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 119 +++
 2 files changed, 225 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d008b551/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index dc6661b..890569c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1240,20 +1240,16 @@ public abstract class Server {
 LOG.info(Thread.currentThread().getName() + ": readAndProcess caught 
InterruptedException", ieo);
 throw ieo;
   } catch (Exception e) {
-// Do not log WrappedRpcServerExceptionSuppressed.
-if (!(e instanceof WrappedRpcServerExceptionSuppressed)) {
-  // A WrappedRpcServerException is an exception that has been sent
-  // to the client, so the stacktrace is unnecessary; any other
-  // exceptions are unexpected internal server errors and thus the
-  // stacktrace should be logged.
-  LOG.info(Thread.currentThread().getName() +
-  ": readAndProcess from client " + c.getHostAddress() +
-  " threw exception [" + e + "]",
-  (e instanceof WrappedRpcServerException) ? null : e);
-}
+// Any exceptions that reach here are fatal unexpected internal errors
+// that could not be sent to the client.
+LOG.info(Thread.currentThread().getName() +
+": readAndProcess from client " + c +
+" threw exception [" + e + "]", e);
 count = -1; //so that the (count < 0) block is executed
   }
-  if (count < 0) {
+  // setupResponse will signal the connection should be closed when a
+  // fatal response is sent.
+  if (count < 0 || c.shouldClose()) {
 closeConnection(c);
 c = null;
   }
@@ -1581,16 +1577,20 @@ public abstract class Server {
* unnecessary stack trace logging if it's not an internal server error. 
*/
   @SuppressWarnings("serial")
-  private static class WrappedRpcServerException extends RpcServerException {
+  private static class FatalRpcServerException extends RpcServerException {
 private final RpcErrorCodeProto errCode;
-public WrappedRpcServerException(RpcErrorCodeProto errCode, IOException 
ioe) {
+public FatalRpcServerException(RpcErrorCodeProto errCode, IOException ioe) 
{
   super(ioe.toString(), ioe);
   this.errCode = errCode;
 }
-public WrappedRpcServerException(RpcErrorCodeProto errCode, String 
message) {
+public FatalRpcServerException(RpcErrorCodeProto errCode, String message) {
   this(errCode, new RpcServerException(message));
 }
 @Override
+public RpcStatusProto getRpcStatusProto() {
+  return RpcStatusProto.FATAL;
+}
+@Override
 public RpcErrorCodeProto getRpcErrorCodeProto() {
   return errCode;
 }
@@ -1600,19 +1600,6 @@ public abstract class Server {
 }
   }
 
-  /**
-   * A WrappedRpcServerException that is suppressed altogether
-   * for the purposes of logging.
-   */
-  @SuppressWarnings("serial")
-  private static class WrappedRpcServerExceptionSuppressed
-  extends WrappedRpcServerException {
-public WrappedRpcServerExceptionSuppressed(
-RpcErrorCodeProto errCode, IOException ioe) {
-  super(errCode, ioe);
-}
-  }
-
   /** Reads calls from a connection and queues them for handling. */
   public class Connection {
 private boolean connectionHeaderRead = false; // connection  header is 
read?
@@ -1644,7 +1631,8 @@ public abstract class Server {
 private ByteBuffer unwrappedData;
 private ByteBuff

hadoop git commit: HADOOP-14034. Allow ipc layer exceptions to selectively close connections. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk a0bfb4150 -> b6bb99c18


HADOOP-14034. Allow ipc layer exceptions to selectively close connections. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6bb99c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6bb99c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6bb99c1

Branch: refs/heads/trunk
Commit: b6bb99c18a772d2179d5cc6757cddf141e8d39c0
Parents: a0bfb41
Author: Kihwal Lee 
Authored: Thu Feb 9 10:47:29 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 10:47:29 2017 -0600

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 206 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 119 +++
 2 files changed, 227 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6bb99c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 5ed6e15..3ca1524 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1241,20 +1241,16 @@ public abstract class Server {
 LOG.info(Thread.currentThread().getName() + ": readAndProcess caught 
InterruptedException", ieo);
 throw ieo;
   } catch (Exception e) {
-// Do not log WrappedRpcServerExceptionSuppressed.
-if (!(e instanceof WrappedRpcServerExceptionSuppressed)) {
-  // A WrappedRpcServerException is an exception that has been sent
-  // to the client, so the stacktrace is unnecessary; any other
-  // exceptions are unexpected internal server errors and thus the
-  // stacktrace should be logged.
-  LOG.info(Thread.currentThread().getName() +
-  ": readAndProcess from client " + c.getHostAddress() +
-  " threw exception [" + e + "]",
-  (e instanceof WrappedRpcServerException) ? null : e);
-}
+// Any exceptions that reach here are fatal unexpected internal errors
+// that could not be sent to the client.
+LOG.info(Thread.currentThread().getName() +
+": readAndProcess from client " + c +
+" threw exception [" + e + "]", e);
 count = -1; //so that the (count < 0) block is executed
   }
-  if (count < 0) {
+  // setupResponse will signal the connection should be closed when a
+  // fatal response is sent.
+  if (count < 0 || c.shouldClose()) {
 closeConnection(c);
 c = null;
   }
@@ -1582,16 +1578,20 @@ public abstract class Server {
* unnecessary stack trace logging if it's not an internal server error. 
*/
   @SuppressWarnings("serial")
-  private static class WrappedRpcServerException extends RpcServerException {
+  private static class FatalRpcServerException extends RpcServerException {
 private final RpcErrorCodeProto errCode;
-public WrappedRpcServerException(RpcErrorCodeProto errCode, IOException 
ioe) {
+public FatalRpcServerException(RpcErrorCodeProto errCode, IOException ioe) 
{
   super(ioe.toString(), ioe);
   this.errCode = errCode;
 }
-public WrappedRpcServerException(RpcErrorCodeProto errCode, String 
message) {
+public FatalRpcServerException(RpcErrorCodeProto errCode, String message) {
   this(errCode, new RpcServerException(message));
 }
 @Override
+public RpcStatusProto getRpcStatusProto() {
+  return RpcStatusProto.FATAL;
+}
+@Override
 public RpcErrorCodeProto getRpcErrorCodeProto() {
   return errCode;
 }
@@ -1601,19 +1601,6 @@ public abstract class Server {
 }
   }
 
-  /**
-   * A WrappedRpcServerException that is suppressed altogether
-   * for the purposes of logging.
-   */
-  @SuppressWarnings("serial")
-  private static class WrappedRpcServerExceptionSuppressed
-  extends WrappedRpcServerException {
-public WrappedRpcServerExceptionSuppressed(
-RpcErrorCodeProto errCode, IOException ioe) {
-  super(errCode, ioe);
-}
-  }
-
   /** Reads calls from a connection and queues them for handling. */
   public class Connection {
 private boolean connectionHeaderRead = false; // connection  header is 
read?
@@ -1645,7 +1632,8 @@ public abstract class Server {
 private ByteBuffer unwrappedData;
 private ByteBuff

hadoop git commit: HADOOP-14034. Allow ipc layer exceptions to selectively close connections. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 bb98856af -> 47bbe431b


HADOOP-14034. Allow ipc layer exceptions to selectively close connections. 
Contributed by Daryn Sharp.

(cherry picked from commit d008b5515304b42faeb48e542c8c27586b8564eb)

Conflicts:

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47bbe431
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47bbe431
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47bbe431

Branch: refs/heads/branch-2.8
Commit: 47bbe431bf85727357feb85650c1b57a3e99f113
Parents: bb98856
Author: Kihwal Lee 
Authored: Thu Feb 9 11:04:29 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 11:04:29 2017 -0600

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 213 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 119 +++
 2 files changed, 235 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47bbe431/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index c99d553..ccdd776 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1127,20 +1127,16 @@ public abstract class Server {
 LOG.info(Thread.currentThread().getName() + ": readAndProcess caught 
InterruptedException", ieo);
 throw ieo;
   } catch (Exception e) {
-// Do not log WrappedRpcServerExceptionSuppressed.
-if (!(e instanceof WrappedRpcServerExceptionSuppressed)) {
-  // A WrappedRpcServerException is an exception that has been sent
-  // to the client, so the stacktrace is unnecessary; any other
-  // exceptions are unexpected internal server errors and thus the
-  // stacktrace should be logged.
-  LOG.info(Thread.currentThread().getName() +
-  ": readAndProcess from client " + c.getHostAddress() +
-  " threw exception [" + e + "]",
-  (e instanceof WrappedRpcServerException) ? null : e);
-}
+// Any exceptions that reach here are fatal unexpected internal errors
+// that could not be sent to the client.
+LOG.info(Thread.currentThread().getName() +
+": readAndProcess from client " + c +
+" threw exception [" + e + "]", e);
 count = -1; //so that the (count < 0) block is executed
   }
-  if (count < 0) {
+  // setupResponse will signal the connection should be closed when a
+  // fatal response is sent.
+  if (count < 0 || c.shouldClose()) {
 closeConnection(c);
 c = null;
   }
@@ -1468,16 +1464,20 @@ public abstract class Server {
* unnecessary stack trace logging if it's not an internal server error. 
*/
   @SuppressWarnings("serial")
-  private static class WrappedRpcServerException extends RpcServerException {
+  private static class FatalRpcServerException extends RpcServerException {
 private final RpcErrorCodeProto errCode;
-public WrappedRpcServerException(RpcErrorCodeProto errCode, IOException 
ioe) {
+public FatalRpcServerException(RpcErrorCodeProto errCode, IOException ioe) 
{
   super(ioe.toString(), ioe);
   this.errCode = errCode;
 }
-public WrappedRpcServerException(RpcErrorCodeProto errCode, String 
message) {
+public FatalRpcServerException(RpcErrorCodeProto errCode, String message) {
   this(errCode, new RpcServerException(message));
 }
 @Override
+public RpcStatusProto getRpcStatusProto() {
+  return RpcStatusProto.FATAL;
+}
+@Override
 public RpcErrorCodeProto getRpcErrorCodeProto() {
   return errCode;
 }
@@ -1487,19 +1487,6 @@ public abstract class Server {
 }
   }
 
-  /**
-   * A WrappedRpcServerException that is suppressed altogether
-   * for the purposes of logging.
-   */
-  @SuppressWarnings("serial")
-  private static class WrappedRpcServerExceptionSuppressed
-  extends WrappedRpcServerException {
-public WrappedRpcServerExceptionSuppressed(
-RpcErrorCodeProto errCode, IOException ioe) {
-  super(errCode, ioe);
-}
-  }
-
   /** Reads calls from a connection and queues them for handling. */
   public class Connection {
 private boolean conne

hadoop git commit: HADOOP-14033. Reduce fair call queue lock contention. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9b8505358 -> 0c01cf579


HADOOP-14033. Reduce fair call queue lock contention. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c01cf57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c01cf57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c01cf57

Branch: refs/heads/trunk
Commit: 0c01cf57987bcc7a17154a3538960b67f625a9e5
Parents: 9b85053
Author: Kihwal Lee 
Authored: Thu Feb 9 16:17:24 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 16:17:24 2017 -0600

--
 .../org/apache/hadoop/ipc/FairCallQueue.java| 167 ++-
 1 file changed, 51 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c01cf57/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 77a9d65..820f24c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -27,8 +27,7 @@ import java.util.AbstractQueue;
 import java.util.HashMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.Condition;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -55,16 +54,15 @@ public class FairCallQueue extends 
AbstractQueue
   /* The queues */
   private final ArrayList> queues;
 
-  /* Read locks */
-  private final ReentrantLock takeLock = new ReentrantLock();
-  private final Condition notEmpty = takeLock.newCondition();
+  /* Track available permits for scheduled objects.  All methods that will
+   * mutate a subqueue must acquire or release a permit on the semaphore.
+   * A semaphore is much faster than an exclusive lock because producers do
+   * not contend with consumers and consumers do not block other consumers
+   * while polling.
+   */
+  private final Semaphore semaphore = new Semaphore(0);
   private void signalNotEmpty() {
-takeLock.lock();
-try {
-  notEmpty.signal();
-} finally {
-  takeLock.unlock();
-}
+semaphore.release();
   }
 
   /* Multiplexer picks which queue to draw from */
@@ -112,28 +110,25 @@ public class FairCallQueue extends 
AbstractQueue
   }
 
   /**
-   * Returns the first non-empty queue with equal to startIdx, or
-   * or scans from highest to lowest priority queue.
+   * Returns an element first non-empty queue equal to the priority returned
+   * by the multiplexer or scans from highest to lowest priority queue.
+   *
+   * Caller must always acquire a semaphore permit before invoking.
*
-   * @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
-  private BlockingQueue getFirstNonEmptyQueue(int startIdx) {
-BlockingQueue queue = this.queues.get(startIdx);
-if (queue.size() != 0) {
-  return queue;
-}
-final int numQueues = this.queues.size();
-for(int i=0; i < numQueues; i++) {
-  queue = this.queues.get(i);
-  if (queue.size() != 0) {
-return queue;
+  private E removeNextElement() {
+int priority = multiplexer.getAndAdvanceCurrentIndex();
+E e = queues.get(priority).poll();
+if (e == null) {
+  for (int idx = 0; e == null && idx < queues.size(); idx++) {
+e = queues.get(idx).poll();
   }
 }
-
-// All queues were empty
-return null;
+// guaranteed to find an element if caller acquired permit.
+assert e != null : "consumer didn't acquire semaphore!";
+return e;
   }
 
   /* AbstractQueue and BlockingQueue methods */
@@ -184,9 +179,9 @@ public class FairCallQueue extends 
AbstractQueue
 int priorityLevel = e.getPriorityLevel();
 BlockingQueue q = this.queues.get(priorityLevel);
 boolean ret = q.offer(e, timeout, unit);
-
-signalNotEmpty();
-
+if (ret) {
+  signalNotEmpty();
+}
 return ret;
   }
 
@@ -195,72 +190,21 @@ public class FairCallQueue extends 
AbstractQueue
 int priorityLevel = e.getPriorityLevel();
 BlockingQueue q = this.queues.get(priorityLevel);
 boolean ret = q.offer(e);
-
-signalNotEmpty();
-
+if (ret) {
+  signalNotEmpty();
+}
 return ret;
   }
 
   

hadoop git commit: HADOOP-14033. Reduce fair call queue lock contention. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 41361ec39 -> 5de6f1b79


HADOOP-14033. Reduce fair call queue lock contention. Contributed by Daryn 
Sharp.

(cherry picked from commit 0c01cf57987bcc7a17154a3538960b67f625a9e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5de6f1b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5de6f1b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5de6f1b7

Branch: refs/heads/branch-2
Commit: 5de6f1b79140499452eef2356e67f74750dacb2a
Parents: 41361ec
Author: Kihwal Lee 
Authored: Thu Feb 9 16:20:06 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 16:20:06 2017 -0600

--
 .../org/apache/hadoop/ipc/FairCallQueue.java| 167 ++-
 1 file changed, 51 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5de6f1b7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 77a9d65..820f24c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -27,8 +27,7 @@ import java.util.AbstractQueue;
 import java.util.HashMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.Condition;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -55,16 +54,15 @@ public class FairCallQueue extends 
AbstractQueue
   /* The queues */
   private final ArrayList> queues;
 
-  /* Read locks */
-  private final ReentrantLock takeLock = new ReentrantLock();
-  private final Condition notEmpty = takeLock.newCondition();
+  /* Track available permits for scheduled objects.  All methods that will
+   * mutate a subqueue must acquire or release a permit on the semaphore.
+   * A semaphore is much faster than an exclusive lock because producers do
+   * not contend with consumers and consumers do not block other consumers
+   * while polling.
+   */
+  private final Semaphore semaphore = new Semaphore(0);
   private void signalNotEmpty() {
-takeLock.lock();
-try {
-  notEmpty.signal();
-} finally {
-  takeLock.unlock();
-}
+semaphore.release();
   }
 
   /* Multiplexer picks which queue to draw from */
@@ -112,28 +110,25 @@ public class FairCallQueue extends 
AbstractQueue
   }
 
   /**
-   * Returns the first non-empty queue with equal to startIdx, or
-   * or scans from highest to lowest priority queue.
+   * Returns an element first non-empty queue equal to the priority returned
+   * by the multiplexer or scans from highest to lowest priority queue.
+   *
+   * Caller must always acquire a semaphore permit before invoking.
*
-   * @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
-  private BlockingQueue getFirstNonEmptyQueue(int startIdx) {
-BlockingQueue queue = this.queues.get(startIdx);
-if (queue.size() != 0) {
-  return queue;
-}
-final int numQueues = this.queues.size();
-for(int i=0; i < numQueues; i++) {
-  queue = this.queues.get(i);
-  if (queue.size() != 0) {
-return queue;
+  private E removeNextElement() {
+int priority = multiplexer.getAndAdvanceCurrentIndex();
+E e = queues.get(priority).poll();
+if (e == null) {
+  for (int idx = 0; e == null && idx < queues.size(); idx++) {
+e = queues.get(idx).poll();
   }
 }
-
-// All queues were empty
-return null;
+// guaranteed to find an element if caller acquired permit.
+assert e != null : "consumer didn't acquire semaphore!";
+return e;
   }
 
   /* AbstractQueue and BlockingQueue methods */
@@ -184,9 +179,9 @@ public class FairCallQueue extends 
AbstractQueue
 int priorityLevel = e.getPriorityLevel();
 BlockingQueue q = this.queues.get(priorityLevel);
 boolean ret = q.offer(e, timeout, unit);
-
-signalNotEmpty();
-
+if (ret) {
+  signalNotEmpty();
+}
 return ret;
   }
 
@@ -195,72 +190,21 @@ public class FairCallQueue extends 
AbstractQueue
 int priorityLevel = e.getPriorityLevel();
 BlockingQueue q = this.queues.get(priorityLevel);
 boolean ret = q.offer(e);
-
-signalNotEmpty();
-
+ 

hadoop git commit: HADOOP-14033. Reduce fair call queue lock contention. Contributed by Daryn Sharp.

2017-02-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 47bbe431b -> 6b3443fbf


HADOOP-14033. Reduce fair call queue lock contention. Contributed by Daryn 
Sharp.

(cherry picked from commit 0c01cf57987bcc7a17154a3538960b67f625a9e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b3443fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b3443fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b3443fb

Branch: refs/heads/branch-2.8
Commit: 6b3443fbf5bb38e6890b6d99d63ab9436f06fc94
Parents: 47bbe43
Author: Kihwal Lee 
Authored: Thu Feb 9 16:20:48 2017 -0600
Committer: Kihwal Lee 
Committed: Thu Feb 9 16:20:48 2017 -0600

--
 .../org/apache/hadoop/ipc/FairCallQueue.java| 167 ++-
 1 file changed, 51 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b3443fb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 77a9d65..820f24c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -27,8 +27,7 @@ import java.util.AbstractQueue;
 import java.util.HashMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.concurrent.locks.Condition;
+import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -55,16 +54,15 @@ public class FairCallQueue extends 
AbstractQueue
   /* The queues */
   private final ArrayList> queues;
 
-  /* Read locks */
-  private final ReentrantLock takeLock = new ReentrantLock();
-  private final Condition notEmpty = takeLock.newCondition();
+  /* Track available permits for scheduled objects.  All methods that will
+   * mutate a subqueue must acquire or release a permit on the semaphore.
+   * A semaphore is much faster than an exclusive lock because producers do
+   * not contend with consumers and consumers do not block other consumers
+   * while polling.
+   */
+  private final Semaphore semaphore = new Semaphore(0);
   private void signalNotEmpty() {
-takeLock.lock();
-try {
-  notEmpty.signal();
-} finally {
-  takeLock.unlock();
-}
+semaphore.release();
   }
 
   /* Multiplexer picks which queue to draw from */
@@ -112,28 +110,25 @@ public class FairCallQueue extends 
AbstractQueue
   }
 
   /**
-   * Returns the first non-empty queue with equal to startIdx, or
-   * or scans from highest to lowest priority queue.
+   * Returns an element first non-empty queue equal to the priority returned
+   * by the multiplexer or scans from highest to lowest priority queue.
+   *
+   * Caller must always acquire a semaphore permit before invoking.
*
-   * @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
-  private BlockingQueue getFirstNonEmptyQueue(int startIdx) {
-BlockingQueue queue = this.queues.get(startIdx);
-if (queue.size() != 0) {
-  return queue;
-}
-final int numQueues = this.queues.size();
-for(int i=0; i < numQueues; i++) {
-  queue = this.queues.get(i);
-  if (queue.size() != 0) {
-return queue;
+  private E removeNextElement() {
+int priority = multiplexer.getAndAdvanceCurrentIndex();
+E e = queues.get(priority).poll();
+if (e == null) {
+  for (int idx = 0; e == null && idx < queues.size(); idx++) {
+e = queues.get(idx).poll();
   }
 }
-
-// All queues were empty
-return null;
+// guaranteed to find an element if caller acquired permit.
+assert e != null : "consumer didn't acquire semaphore!";
+return e;
   }
 
   /* AbstractQueue and BlockingQueue methods */
@@ -184,9 +179,9 @@ public class FairCallQueue extends 
AbstractQueue
 int priorityLevel = e.getPriorityLevel();
 BlockingQueue q = this.queues.get(priorityLevel);
 boolean ret = q.offer(e, timeout, unit);
-
-signalNotEmpty();
-
+if (ret) {
+  signalNotEmpty();
+}
 return ret;
   }
 
@@ -195,72 +190,21 @@ public class FairCallQueue extends 
AbstractQueue
 int priorityLevel = e.getPriorityLevel();
 BlockingQueue q = this.queues.get(priorityLevel);
 boolean ret = q.offer(e);
-
- 

hadoop git commit: HDFS-10617. PendingReconstructionBlocks.size() should be synchronized. Contributed by Eric Badger.

2016-07-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 728bf7f69 -> 2bbc3ea1b


HDFS-10617. PendingReconstructionBlocks.size() should be synchronized. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bbc3ea1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bbc3ea1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bbc3ea1

Branch: refs/heads/trunk
Commit: 2bbc3ea1b54c25c28eb04caa48dece5cfc19d613
Parents: 728bf7f
Author: Kihwal Lee 
Authored: Wed Jul 13 21:59:35 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Jul 13 21:59:35 2016 -0500

--
 .../hdfs/server/blockmanagement/PendingReconstructionBlocks.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bbc3ea1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 956e94f..6628c43 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -134,7 +134,9 @@ class PendingReconstructionBlocks {
* The total number of blocks that are undergoing reconstruction.
*/
   int size() {
-return pendingReconstructions.size();
+synchronized (pendingReconstructions) {
+  return pendingReconstructions.size();
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10617. PendingReconstructionBlocks.size() should be synchronized. Contributed by Eric Badger.

2016-07-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b95f1af8a -> dba8eee89


HDFS-10617. PendingReconstructionBlocks.size() should be synchronized. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dba8eee8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dba8eee8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dba8eee8

Branch: refs/heads/branch-2
Commit: dba8eee8997f5f3665069cd4f1712c9a207391d8
Parents: b95f1af
Author: Kihwal Lee 
Authored: Wed Jul 13 22:03:48 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Jul 13 22:03:48 2016 -0500

--
 .../hdfs/server/blockmanagement/PendingReplicationBlocks.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dba8eee8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
index 88eaaca..1c73edf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
@@ -134,7 +134,9 @@ class PendingReplicationBlocks {
* The total number of blocks that are undergoing replication
*/
   int size() {
-return pendingReplications.size();
+synchronized(pendingReplications) {
+  return pendingReplications.size();
+}
   } 
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13429. Dispose of unnecessary SASL servers. Contributed by Daryn Sharp.

2016-08-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7fc70c642 -> b3018e73c


HADOOP-13429. Dispose of unnecessary SASL servers. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3018e73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3018e73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3018e73

Branch: refs/heads/trunk
Commit: b3018e73ccae43484d9cb85eabae814eb7f050a6
Parents: 7fc70c6
Author: Kihwal Lee 
Authored: Tue Aug 2 10:40:28 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Aug 2 10:40:28 2016 -0500

--
 .../src/main/java/org/apache/hadoop/ipc/Server.java| 13 +
 .../test/java/org/apache/hadoop/ipc/TestSaslRPC.java   | 12 +++-
 2 files changed, 20 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3018e73/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 405549a..80eea84 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1598,7 +1598,10 @@ public abstract class Server {
 String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
 // SASL wrapping is only used if the connection has a QOP, and
 // the value is not auth.  ex. auth-int & auth-priv
-useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));
+useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));
+if (!useWrap) {
+  disposeSasl();
+}
   }
 }
 
@@ -1692,9 +1695,9 @@ public abstract class Server {
 private void switchToSimple() {
   // disable SASL and blank out any SASL server
   authProtocol = AuthProtocol.NONE;
-  saslServer = null;
+  disposeSasl();
 }
-
+
 private RpcSaslProto buildSaslResponse(SaslState state, byte[] replyToken) 
{
   if (LOG.isDebugEnabled()) {
 LOG.debug("Will send " + state + " token of size "
@@ -1731,6 +1734,8 @@ public abstract class Server {
 try {
   saslServer.dispose();
 } catch (SaslException ignored) {
+} finally {
+  saslServer = null;
 }
   }
 }
@@ -1980,7 +1985,7 @@ public abstract class Server {
   .getProtocol() : null;
 
   UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
-  if (saslServer == null) {
+  if (authProtocol == AuthProtocol.NONE) {
 user = protocolUser;
   } else {
 // user is authenticated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3018e73/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index ec53e8c..72371a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.Server.Connection;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.*;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -270,7 +271,16 @@ public class TestSaslRPC extends TestRpcBase {
   assertEquals(TOKEN, authMethod);
   //QOP must be auth
   assertEquals(expectedQop.saslQop,
-   RPC.getConnectionIdForProxy(proxy).getSaslQop());   
 
+   RPC.getConnectionIdForProxy(proxy).getSaslQop());
+  int n = 0;
+  for (Connection connection : server.getConnections()) {
+// only qop auth should dispose of the sasl server
+boolean hasServer = (connection.saslServer != null);
+assertTrue("qop:" + expectedQop + " hasServer:" + hasServer,
+(expectedQop == QualityOfProtection.AUTHENTICATION) ^ hasServer);
+n++;
+  }
+  assertTrue(n > 0);

hadoop git commit: HADOOP-13429. Dispose of unnecessary SASL servers. Contributed by Daryn Sharp.

2016-08-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b63e6c802 -> 41882bd5f


HADOOP-13429. Dispose of unnecessary SASL servers. Contributed by Daryn Sharp.

(cherry picked from commit b3018e73ccae43484d9cb85eabae814eb7f050a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41882bd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41882bd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41882bd5

Branch: refs/heads/branch-2
Commit: 41882bd5ff53829d9fd4bf9d6d50ba245d450a5f
Parents: b63e6c8
Author: Kihwal Lee 
Authored: Tue Aug 2 10:52:10 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Aug 2 10:52:10 2016 -0500

--
 .../src/main/java/org/apache/hadoop/ipc/Server.java| 13 +
 .../test/java/org/apache/hadoop/ipc/TestSaslRPC.java   | 12 +++-
 2 files changed, 20 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41882bd5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index ab35d2c..6eb93fd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1573,7 +1573,10 @@ public abstract class Server {
 String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
 // SASL wrapping is only used if the connection has a QOP, and
 // the value is not auth.  ex. auth-int & auth-priv
-useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));
+useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));
+if (!useWrap) {
+  disposeSasl();
+}
   }
 }
 
@@ -1654,9 +1657,9 @@ public abstract class Server {
 private void switchToSimple() {
   // disable SASL and blank out any SASL server
   authProtocol = AuthProtocol.NONE;
-  saslServer = null;
+  disposeSasl();
 }
-
+
 private RpcSaslProto buildSaslResponse(SaslState state, byte[] replyToken) 
{
   if (LOG.isDebugEnabled()) {
 LOG.debug("Will send " + state + " token of size "
@@ -1693,6 +1696,8 @@ public abstract class Server {
 try {
   saslServer.dispose();
 } catch (SaslException ignored) {
+} finally {
+  saslServer = null;
 }
   }
 }
@@ -1911,7 +1916,7 @@ public abstract class Server {
   .getProtocol() : null;
 
   UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
-  if (saslServer == null) {
+  if (authProtocol == AuthProtocol.NONE) {
 user = protocolUser;
   } else {
 // user is authenticated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41882bd5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index ec53e8c..72371a7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.Server.Connection;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.*;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -270,7 +271,16 @@ public class TestSaslRPC extends TestRpcBase {
   assertEquals(TOKEN, authMethod);
   //QOP must be auth
   assertEquals(expectedQop.saslQop,
-   RPC.getConnectionIdForProxy(proxy).getSaslQop());   
 
+   RPC.getConnectionIdForProxy(proxy).getSaslQop());
+  int n = 0;
+  for (Connection connection : server.getConnections()) {
+// only qop auth should dispose of the sasl server
+boolean hasServer = (connection.saslServer != null);
+assertTrue("qop:" + expectedQop + " hasServer:" + hasServer,
+(expectedQop == QualityOfProtection.AUTHENTICATION) ^ hasServer);
+n++;
+  }
+  assertTrue(n > 0);

hadoop git commit: HDFS-5805. TestCheckpoint.testCheckpoint fails intermittently on branch-2. Contributed by Eric Badger.

2016-08-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0458a2af6 -> 5e5b8793f


HDFS-5805. TestCheckpoint.testCheckpoint fails intermittently on branch-2. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e5b8793
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e5b8793
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e5b8793

Branch: refs/heads/trunk
Commit: 5e5b8793fba8e25aeba7a74878da4cf8e806f061
Parents: 0458a2a
Author: Kihwal Lee 
Authored: Tue Aug 2 15:37:36 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Aug 2 15:37:36 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/TestCheckpoint.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e5b8793/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 7c9df29..e643d21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1030,6 +1030,7 @@ public class TestCheckpoint {
*/
   @Test
   public void testCheckpoint() throws IOException {
+Path tmpDir = new Path("/tmp_tmp");
 Path file1 = new Path("checkpoint.dat");
 Path file2 = new Path("checkpoint2.dat");
 Configuration conf = new HdfsConfiguration();
@@ -1057,6 +1058,11 @@ public class TestCheckpoint {
   replication, seed);
   checkFile(fileSys, file1, replication);
 
+  for(int i=0; i < 1000; i++) {
+fileSys.mkdirs(tmpDir);
+fileSys.delete(tmpDir, true);
+  }
+
   //
   // Take a checkpoint
   //
@@ -1081,7 +1087,6 @@ public class TestCheckpoint {
 //
 // Restart cluster and verify that file1 still exist.
 //
-Path tmpDir = new Path("/tmp_tmp");
 try {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
   .format(false).build();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-5805. TestCheckpoint.testCheckpoint fails intermittently on branch-2. Contributed by Eric Badger.

2016-08-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ec522a190 -> dc0a17e82


HDFS-5805. TestCheckpoint.testCheckpoint fails intermittently on branch-2. 
Contributed by Eric Badger.

(cherry picked from commit 5e5b8793fba8e25aeba7a74878da4cf8e806f061)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc0a17e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc0a17e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc0a17e8

Branch: refs/heads/branch-2
Commit: dc0a17e828fa9ef59c4148b8b215d5cdc4605a59
Parents: ec522a1
Author: Kihwal Lee 
Authored: Tue Aug 2 15:39:03 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Aug 2 15:39:03 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/TestCheckpoint.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0a17e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 0529391..3945dab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1039,6 +1039,7 @@ public class TestCheckpoint {
*/
   @Test
   public void testCheckpoint() throws IOException {
+Path tmpDir = new Path("/tmp_tmp");
 Path file1 = new Path("checkpoint.dat");
 Path file2 = new Path("checkpoint2.dat");
 Configuration conf = new HdfsConfiguration();
@@ -1065,6 +1066,11 @@ public class TestCheckpoint {
   writeFile(fileSys, file1, replication);
   checkFile(fileSys, file1, replication);
 
+  for(int i=0; i < 1000; i++) {
+fileSys.mkdirs(tmpDir);
+fileSys.delete(tmpDir, true);
+  }
+
   //
   // Take a checkpoint
   //
@@ -1089,7 +1095,6 @@ public class TestCheckpoint {
 //
 // Restart cluster and verify that file1 still exist.
 //
-Path tmpDir = new Path("/tmp_tmp");
 try {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
   .format(false).build();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-5805. TestCheckpoint.testCheckpoint fails intermittently on branch-2. Contributed by Eric Badger.

2016-08-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 06df0cd87 -> 288f9ccde


HDFS-5805. TestCheckpoint.testCheckpoint fails intermittently on branch-2. 
Contributed by Eric Badger.

(cherry picked from commit 5e5b8793fba8e25aeba7a74878da4cf8e806f061)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/288f9ccd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/288f9ccd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/288f9ccd

Branch: refs/heads/branch-2.8
Commit: 288f9ccde29d7fc61059f59fa001e88b20c35f7a
Parents: 06df0cd
Author: Kihwal Lee 
Authored: Tue Aug 2 15:43:11 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Aug 2 15:43:11 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/TestCheckpoint.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/288f9ccd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index ad10919..d914b09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1039,6 +1039,7 @@ public class TestCheckpoint {
*/
   @Test
   public void testCheckpoint() throws IOException {
+Path tmpDir = new Path("/tmp_tmp");
 Path file1 = new Path("checkpoint.dat");
 Path file2 = new Path("checkpoint2.dat");
 Configuration conf = new HdfsConfiguration();
@@ -1065,6 +1066,11 @@ public class TestCheckpoint {
   writeFile(fileSys, file1, replication);
   checkFile(fileSys, file1, replication);
 
+  for(int i=0; i < 1000; i++) {
+fileSys.mkdirs(tmpDir);
+fileSys.delete(tmpDir, true);
+  }
+
   //
   // Take a checkpoint
   //
@@ -1089,7 +1095,6 @@ public class TestCheckpoint {
 //
 // Restart cluster and verify that file1 still exist.
 //
-Path tmpDir = new Path("/tmp_tmp");
 try {
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
   .format(false).build();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13426. More efficiently build IPC responses. Contributed by Daryn Sharp.

2016-08-03 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk d848184e9 -> 2d8227605


HADOOP-13426. More efficiently build IPC responses. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d822760
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d822760
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d822760

Branch: refs/heads/trunk
Commit: 2d8227605fe22c1c05f31729edc8939013763c05
Parents: d848184
Author: Kihwal Lee 
Authored: Wed Aug 3 09:30:24 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 3 09:31:49 2016 -0500

--
 .../org/apache/hadoop/ipc/ResponseBuffer.java   |  98 +
 .../main/java/org/apache/hadoop/ipc/Server.java | 143 +--
 .../apache/hadoop/ipc/TestResponseBuffer.java   |  87 +++
 3 files changed, 250 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d822760/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
new file mode 100644
index 000..ac96a24
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+class ResponseBuffer extends DataOutputStream {
+  ResponseBuffer(int capacity) {
+super(new FramedBuffer(capacity));
+  }
+
+  // update framing bytes based on bytes written to stream.
+  private FramedBuffer getFramedBuffer() {
+FramedBuffer buf = (FramedBuffer)out;
+buf.setSize(written);
+return buf;
+  }
+
+  void writeTo(OutputStream out) throws IOException {
+getFramedBuffer().writeTo(out);
+  }
+
+  byte[] toByteArray() {
+return getFramedBuffer().toByteArray();
+  }
+
+  int capacity() {
+return ((FramedBuffer)out).capacity();
+  }
+
+  void setCapacity(int capacity) {
+((FramedBuffer)out).setCapacity(capacity);
+  }
+
+  void ensureCapacity(int capacity) {
+if (((FramedBuffer)out).capacity() < capacity) {
+  ((FramedBuffer)out).setCapacity(capacity);
+}
+  }
+
+  ResponseBuffer reset() {
+written = 0;
+((FramedBuffer)out).reset();
+return this;
+  }
+
+  private static class FramedBuffer extends ByteArrayOutputStream {
+private static final int FRAMING_BYTES = 4;
+FramedBuffer(int capacity) {
+  super(capacity + FRAMING_BYTES);
+  reset();
+}
+@Override
+public int size() {
+  return count - FRAMING_BYTES;
+}
+void setSize(int size) {
+  buf[0] = (byte)((size >>> 24) & 0xFF);
+  buf[1] = (byte)((size >>> 16) & 0xFF);
+  buf[2] = (byte)((size >>>  8) & 0xFF);
+  buf[3] = (byte)((size >>>  0) & 0xFF);
+}
+int capacity() {
+  return buf.length - FRAMING_BYTES;
+}
+void setCapacity(int capacity) {
+  buf = Arrays.copyOf(buf, capacity + FRAMING_BYTES);
+}
+@Override
+public void reset() {
+  count = FRAMING_BYTES;
+  setSize(0);
+}
+  };
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d822760/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.jav

hadoop git commit: HADOOP-13426. More efficiently build IPC responses. Contributed by Daryn Sharp.

2016-08-03 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1087745d7 -> dd21d242e


HADOOP-13426. More efficiently build IPC responses. Contributed by Daryn Sharp.

(cherry picked from commit 2d8227605fe22c1c05f31729edc8939013763c05)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd21d242
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd21d242
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd21d242

Branch: refs/heads/branch-2
Commit: dd21d242eb8b21f50c8ecdb74f01ea7dcd313ab0
Parents: 1087745
Author: Kihwal Lee 
Authored: Wed Aug 3 09:33:04 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 3 09:33:04 2016 -0500

--
 .../org/apache/hadoop/ipc/ResponseBuffer.java   |  98 +
 .../main/java/org/apache/hadoop/ipc/Server.java | 143 +--
 .../apache/hadoop/ipc/TestResponseBuffer.java   |  87 +++
 3 files changed, 250 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd21d242/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
new file mode 100644
index 000..ac96a24
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+class ResponseBuffer extends DataOutputStream {
+  ResponseBuffer(int capacity) {
+super(new FramedBuffer(capacity));
+  }
+
+  // update framing bytes based on bytes written to stream.
+  private FramedBuffer getFramedBuffer() {
+FramedBuffer buf = (FramedBuffer)out;
+buf.setSize(written);
+return buf;
+  }
+
+  void writeTo(OutputStream out) throws IOException {
+getFramedBuffer().writeTo(out);
+  }
+
+  byte[] toByteArray() {
+return getFramedBuffer().toByteArray();
+  }
+
+  int capacity() {
+return ((FramedBuffer)out).capacity();
+  }
+
+  void setCapacity(int capacity) {
+((FramedBuffer)out).setCapacity(capacity);
+  }
+
+  void ensureCapacity(int capacity) {
+if (((FramedBuffer)out).capacity() < capacity) {
+  ((FramedBuffer)out).setCapacity(capacity);
+}
+  }
+
+  ResponseBuffer reset() {
+written = 0;
+((FramedBuffer)out).reset();
+return this;
+  }
+
+  private static class FramedBuffer extends ByteArrayOutputStream {
+private static final int FRAMING_BYTES = 4;
+FramedBuffer(int capacity) {
+  super(capacity + FRAMING_BYTES);
+  reset();
+}
+@Override
+public int size() {
+  return count - FRAMING_BYTES;
+}
+void setSize(int size) {
+  buf[0] = (byte)((size >>> 24) & 0xFF);
+  buf[1] = (byte)((size >>> 16) & 0xFF);
+  buf[2] = (byte)((size >>>  8) & 0xFF);
+  buf[3] = (byte)((size >>>  0) & 0xFF);
+}
+int capacity() {
+  return buf.length - FRAMING_BYTES;
+}
+void setCapacity(int capacity) {
+  buf = Arrays.copyOf(buf, capacity + FRAMING_BYTES);
+}
+@Override
+public void reset() {
+  count = FRAMING_BYTES;
+  setSize(0);
+}
+  };
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd21d242/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoo

hadoop git commit: HDFS-10656. Optimize conversion of byte arrays back to path string. Contributed by Daryn Sharp.

2016-08-03 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2d8227605 -> bebf10d24


HDFS-10656. Optimize conversion of byte arrays back to path string. Contributed 
by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bebf10d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bebf10d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bebf10d2

Branch: refs/heads/trunk
Commit: bebf10d2455cad1aa8985553417d4d74a61150ee
Parents: 2d82276
Author: Kihwal Lee 
Authored: Wed Aug 3 11:53:41 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 3 11:53:41 2016 -0500

--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 52 
 1 file changed, 32 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bebf10d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 5ab6978..3fe944e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -84,7 +84,6 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -267,27 +266,40 @@ public class DFSUtil {
   /**
* Given a list of path components returns a path as a UTF8 String
*/
-  public static String byteArray2PathString(byte[][] pathComponents,
-  int offset, int length) {
-if (pathComponents.length == 0) {
+  public static String byteArray2PathString(final byte[][] components,
+  final int offset, final int length) {
+// specifically not using StringBuilder to more efficiently build
+// string w/o excessive byte[] copies and charset conversions.
+final int range = offset + length;
+Preconditions.checkPositionIndexes(offset, range, components.length);
+if (length == 0) {
   return "";
 }
-Preconditions.checkArgument(offset >= 0 && offset < pathComponents.length);
-Preconditions.checkArgument(length >= 0 && offset + length <=
-pathComponents.length);
-if (offset == 0 && length == 1
-&& (pathComponents[0] == null || pathComponents[0].length == 0)) {
-  return Path.SEPARATOR;
-}
-StringBuilder result = new StringBuilder();
-int lastIndex = offset + length - 1;
-for (int i = offset; i <= lastIndex; i++) {
-  result.append(new String(pathComponents[i], Charsets.UTF_8));
-  if (i < lastIndex) {
-result.append(Path.SEPARATOR_CHAR);
-  }
-}
-return result.toString();
+// absolute paths start with either null or empty byte[]
+byte[] firstComponent = components[offset];
+boolean isAbsolute = (offset == 0 &&
+(firstComponent == null || firstComponent.length == 0));
+if (offset == 0 && length == 1) {
+  return isAbsolute ? Path.SEPARATOR : bytes2String(firstComponent);
+}
+// compute length of full byte[], seed with 1st component and delimiters
+int pos = isAbsolute ? 0 : firstComponent.length;
+int size = pos + length - 1;
+for (int i=offset + 1; i < range; i++) {
+  size += components[i].length;
+}
+final byte[] result = new byte[size];
+if (!isAbsolute) {
+  System.arraycopy(firstComponent, 0, result, 0, firstComponent.length);
+}
+// append remaining components as "/component".
+for (int i=offset + 1; i < range; i++) {
+  result[pos++] = (byte)Path.SEPARATOR_CHAR;
+  int len = components[i].length;
+  System.arraycopy(components[i], 0, result, pos, len);
+  pos += len;
+}
+return bytes2String(result);
   }
 
   public static String byteArray2PathString(byte[][] pathComponents) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



<    3   4   5   6   7   8   9   10   >