hadoop git commit: HDFS-7058. Tests for truncate CLI. Contributed by Dasha Boudnik.

2015-02-09 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a1bf7aecf - 2b722b904


HDFS-7058. Tests for truncate CLI. Contributed by Dasha Boudnik.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b722b90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b722b90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b722b90

Branch: refs/heads/branch-2
Commit: 2b722b904e49ab47ea9aaac5063ac2dfc6e42910
Parents: a1bf7ae
Author: Konstantin V Shvachko s...@apache.org
Authored: Mon Feb 9 21:31:02 2015 -0800
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Feb 9 21:31:02 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../src/test/resources/testHDFSConf.xml | 36 
 2 files changed, 38 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b722b90/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6d2715c..629b018 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -310,6 +310,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7743. Code cleanup of BlockInfo and rename BlockInfo to
 BlockInfoContiguous. (jing9)
 
+HDFS-7058. Tests for truncate CLI. (Dasha Boudnik via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b722b90/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index aef15da..93bbeb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -17064,5 +17064,41 @@ $/expected-output
 /comparator
   /comparators
 /test
+test !-- TESTED --
+  descriptiontruncate to 5 bytes after waiting for block recovery to 
complete/description
+  test-commands
+command-fs NAMENODE -mkdir -p /user/USERNAME/dir0/command
+command-fs NAMENODE -put CLITEST_DATA/data120bytes 
/user/USERNAME/dir0/command
+command-fs NAMENODE -truncate -w 5 
/user/USERNAME/dir0/data120bytes/command
+command-fs NAMENODE -cat /user/USERNAME/dir0/data120bytes/command
+  /test-commands
+  cleanup-commands
+command-fs NAMENODE -rm -r /user/USERNAME/dir0/command
+  /cleanup-commands
+  comparators
+comparator
+  typeRegexpComparator/type
+  expected-output12345/expected-output
+/comparator
+  /comparators
+/test
+test !-- TESTED --
+  descriptiontruncate to greater length than length of file/description
+  test-commands
+command-fs NAMENODE -mkdir /user/USERNAME/dir0/command
+command-fs NAMENODE -put CLITEST_DATA/data15bytes 
/user/USERNAME/dir0/command
+command-fs NAMENODE -truncate -w 50 
/user/USERNAME/dir0/data15bytes/command
+command-fs NAMENODE -cat /user/USERNAME/dir0/data15bytes/command
+  /test-commands
+  cleanup-commands
+command-fs NAMENODE -rm -r /user/USERNAME/dir0/command
+  /cleanup-commands
+  comparators
+comparator
+  typeRegexpComparator/type
+  expected-output12345678901234/expected-output
+/comparator
+  /comparators
+/test
   /tests
 /configuration



hadoop git commit: HDFS-7058. Tests for truncate CLI. Contributed by Dasha Boudnik.

2015-02-09 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3d15728ff - b73956fda


HDFS-7058. Tests for truncate CLI. Contributed by Dasha Boudnik.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b73956fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b73956fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b73956fd

Branch: refs/heads/trunk
Commit: b73956fdad87e03e9875a96465579a1a3b17c498
Parents: 3d15728
Author: Konstantin V Shvachko s...@apache.org
Authored: Mon Feb 9 21:26:59 2015 -0800
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Feb 9 21:26:59 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../src/test/resources/testHDFSConf.xml | 36 
 2 files changed, 38 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b73956fd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1ca2263..59d498d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -606,6 +606,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7743. Code cleanup of BlockInfo and rename BlockInfo to
 BlockInfoContiguous. (jing9)
 
+HDFS-7058. Tests for truncate CLI. (Dasha Boudnik via shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b73956fd/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 8b221d6..05546a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -17266,5 +17266,41 @@ $/expected-output
 /comparator
   /comparators
 /test
+test !-- TESTED --
+  descriptiontruncate to 5 bytes after waiting for block recovery to 
complete/description
+  test-commands
+command-fs NAMENODE -mkdir -p /user/USERNAME/dir0/command
+command-fs NAMENODE -put CLITEST_DATA/data120bytes 
/user/USERNAME/dir0/command
+command-fs NAMENODE -truncate -w 5 
/user/USERNAME/dir0/data120bytes/command
+command-fs NAMENODE -cat /user/USERNAME/dir0/data120bytes/command
+  /test-commands
+  cleanup-commands
+command-fs NAMENODE -rm -r /user/USERNAME/dir0/command
+  /cleanup-commands
+  comparators
+comparator
+  typeRegexpComparator/type
+  expected-output12345/expected-output
+/comparator
+  /comparators
+/test
+test !-- TESTED --
+  descriptiontruncate to greater length than length of file/description
+  test-commands
+command-fs NAMENODE -mkdir /user/USERNAME/dir0/command
+command-fs NAMENODE -put CLITEST_DATA/data15bytes 
/user/USERNAME/dir0/command
+command-fs NAMENODE -truncate -w 50 
/user/USERNAME/dir0/data15bytes/command
+command-fs NAMENODE -cat /user/USERNAME/dir0/data15bytes/command
+  /test-commands
+  cleanup-commands
+command-fs NAMENODE -rm -r /user/USERNAME/dir0/command
+  /cleanup-commands
+  comparators
+comparator
+  typeRegexpComparator/type
+  expected-output12345678901234/expected-output
+/comparator
+  /comparators
+/test
   /tests
 /configuration



hadoop git commit: YARN-3155. Refactor the exception handling code for TimelineClientImpl's retryOn method (Li Lu via wangda)

2015-02-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c0e14d546 - 4d4442cb3


YARN-3155. Refactor the exception handling code for TimelineClientImpl's 
retryOn method (Li Lu via wangda)

(cherry picked from commit 00a748d24a565bce0cc8cfa2bdcf165778cea395)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d4442cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d4442cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d4442cb

Branch: refs/heads/branch-2
Commit: 4d4442cb39a4bb76841e49492a706bf3e44c
Parents: c0e14d5
Author: Wangda Tan wan...@apache.org
Authored: Mon Feb 9 17:20:25 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Mon Feb 9 17:30:59 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 .../yarn/client/api/impl/TimelineClientImpl.java | 15 +--
 2 files changed, 4 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4442cb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 82fb766..7731840 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -221,6 +221,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3144. Configuration for making delegation token failures to timeline
 server not-fatal (Jonathan Eagles via jlowe)
 
+YARN-3155. Refactor the exception handling code for TimelineClientImpl's 
+retryOn method (Li Lu via wangda)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4442cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 0b88632..af68492 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -178,20 +178,7 @@ public class TimelineClientImpl extends TimelineClient {
 try {
   // try perform the op, if fail, keep retrying
   return op.run();
-}  catch (IOException e) {
-  // We may only throw runtime and IO exceptions. After switching to
-  // Java 1.7, we can merge these two catch blocks into one.
-
-  // break if there's no retries left
-  if (leftRetries == 0) {
-break;
-  }
-  if (op.shouldRetryOn(e)) {
-logException(e, leftRetries);
-  } else {
-throw e;
-  }
-} catch (RuntimeException e) {
+} catch (IOException | RuntimeException e) {
   // break if there's no retries left
   if (leftRetries == 0) {
 break;



hadoop git commit: HADOOP-11510. Expose truncate API via FileContext. (yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4d4442cb3 - ae316705b


HADOOP-11510. Expose truncate API via FileContext. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae316705
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae316705
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae316705

Branch: refs/heads/branch-2
Commit: ae316705bb79479038a13b80bab6febbe8f3c75f
Parents: 4d4442c
Author: yliu y...@apache.org
Authored: Tue Feb 10 01:43:08 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 01:43:08 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../apache/hadoop/fs/AbstractFileSystem.java|  9 
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  5 +++
 .../apache/hadoop/fs/DelegateToFileSystem.java  |  6 +++
 .../java/org/apache/hadoop/fs/FileContext.java  | 43 
 .../java/org/apache/hadoop/fs/FilterFs.java |  8 
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java |  6 +++
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 17 +++-
 .../org/apache/hadoop/fs/TestAfsCheckPath.java  |  6 +++
 .../main/java/org/apache/hadoop/fs/Hdfs.java|  6 +++
 .../fs/TestHDFSFileContextMainOperations.java   | 32 ++-
 11 files changed, 138 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8d1047f..a61c349 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -31,6 +31,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins 
testing
 job. (Yongjun Zhang and Todd Lipcon via ozawa)
 
+HADOOP-11510. Expose truncate API via FileContext. (yliu)
+
   IMPROVEMENTS
 
 HADOOP-11483. HardLink.java should use the jdk7 createLink method 
(aajisaka)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 92d4eca..975cc3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -639,6 +639,15 @@ public abstract class AbstractFileSystem {
 
   /**
* The specification of this method matches that of
+   * {@link FileContext#truncate(Path, long)} except that Path f must be for
+   * this file system.
+   */
+  public abstract boolean truncate(Path f, long newLength)
+  throws AccessControlException, FileNotFoundException,
+  UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be
* for this file system.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index ab5cd13..7dc4a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
 
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+throw new IOException(Not supported);
+  }
+
   /**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae316705/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 1cdcb27..09707c6 100644
--- 

[7/8] hadoop git commit: HADOOP-11541. Raw XOR coder

2015-02-09 Thread zhz
HADOOP-11541. Raw XOR coder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c36a7a9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c36a7a9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c36a7a9e

Branch: refs/heads/HDFS-7285
Commit: c36a7a9e5bedb6fafef45358bb0a1e93de669c02
Parents: f9e1cc2
Author: Kai Zheng dran...@apache.org
Authored: Sun Feb 8 01:40:27 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 17:10:14 2015 -0800

--
 .../io/erasurecode/rawcoder/XorRawDecoder.java  |  81 ++
 .../io/erasurecode/rawcoder/XorRawEncoder.java  |  61 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 262 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  96 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   |  52 
 5 files changed, 552 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36a7a9e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
new file mode 100644
index 000..98307a7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ */
+public class XorRawDecoder extends AbstractRawErasureDecoder {
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].remaining();
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0].put(j, (byte) 0);
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0].put(j, (byte) (outputs[0].get(j) ^ inputs[i].get(j)));
+  }
+}
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] erasedIndexes,
+  byte[][] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].length;
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0][j] = 0;
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0][j] ^= inputs[i][j];
+  }
+}
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36a7a9e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
new file mode 100644
index 000..99b20b9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE 

[2/8] hadoop git commit: HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by Zhe Zhang

2015-02-09 Thread zhz
HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by 
Zhe Zhang

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae4e4d41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae4e4d41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae4e4d41

Branch: refs/heads/HDFS-7285
Commit: ae4e4d41b53559ff329f5e6b7c6b9b5164812700
Parents: 5c27789
Author: Zhe Zhang z...@apache.org
Authored: Fri Jan 30 16:16:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 16:59:34 2015 -0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |  4 +
 .../server/blockmanagement/BlockIdManager.java  |  8 +-
 .../SequentialBlockGroupIdGenerator.java| 82 +++
 .../SequentialBlockIdGenerator.java |  6 +-
 .../hdfs/server/namenode/FSDirectory.java   |  8 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 34 +---
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 11 +++
 .../hdfs/server/namenode/TestAddBlockgroup.java | 84 
 9 files changed, 223 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae4e4d41/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index beb3e38..04a631f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -217,6 +217,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = 
dfs.namenode.replication.min;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = 
dfs.namenode.stripe.min;
+  public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY 
= dfs.namenode.replication.pending.timeout-sec;
   public static final int 
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = 
dfs.namenode.replication.max-streams;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae4e4d41/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 8b3dbd0..7d50360 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -180,4 +180,8 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
+
+  public static final byte NUM_DATA_BLOCKS = 3;
+  public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae4e4d41/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 1c69203..c8b9d20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -53,10 +53,12 @@ public class BlockIdManager {
* The global block ID space for this file system.
*/
   private final 

[3/8] hadoop git commit: HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe Zhang

2015-02-09 Thread zhz
HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe 
Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb3132b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb3132b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb3132b4

Branch: refs/heads/HDFS-7285
Commit: eb3132b46f029ea6420d2787259edcdd121a9502
Parents: ae4e4d4
Author: Zhe Zhang z...@apache.org
Authored: Mon Feb 9 10:27:14 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 17:01:21 2015 -0800

--
 .../server/blockmanagement/BlockIdManager.java|  8 
 .../hdfs/server/blockmanagement/BlockManager.java | 18 +-
 2 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb3132b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index c8b9d20..e7f8a05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -211,4 +211,12 @@ public class BlockIdManager {
   .LAST_RESERVED_BLOCK_ID);
 generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
   }
+
+  public static boolean isStripedBlockID(long id) {
+return id  0;
+  }
+
+  public static long convertToGroupID(long id) {
+return id  (~(HdfsConstants.MAX_BLOCKS_IN_GROUP - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb3132b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3fe47af..8610b79 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1872,7 +1872,7 @@ public class BlockManager {
   break;
 }
 
-BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
+BlockInfoContiguous bi = getStoredBlock(b);
 if (bi == null) {
   if (LOG.isDebugEnabled()) {
 LOG.debug(BLOCK* rescanPostponedMisreplicatedBlocks:  +
@@ -1977,7 +1977,7 @@ public class BlockManager {
 continue;
   }
   
-  BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
+  BlockInfoContiguous storedBlock = getStoredBlock(iblk);
   // If block does not belong to any file, we are done.
   if (storedBlock == null) continue;
   
@@ -2119,7 +2119,7 @@ public class BlockManager {
 }
 
 // find block by blockId
-BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
+BlockInfoContiguous storedBlock = getStoredBlock(block);
 if(storedBlock == null) {
   // If blocksMap does not contain reported block id,
   // the replica should be removed from the data-node.
@@ -2410,7 +2410,7 @@ public class BlockManager {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 if (block instanceof BlockInfoContiguousUnderConstruction) {
   //refresh our copy in case the block got completed in another thread
-  storedBlock = blocksMap.getStoredBlock(block);
+  storedBlock = getStoredBlock(block);
 } else {
   storedBlock = block;
 }
@@ -3356,7 +3356,15 @@ public class BlockManager {
   }
 
   public BlockInfoContiguous getStoredBlock(Block block) {
-return blocksMap.getStoredBlock(block);
+BlockInfoContiguous info = null;
+if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  info = blocksMap.getStoredBlock(
+  new Block(BlockIdManager.convertToGroupID(block.getBlockId(;
+}
+if (info == null) {
+  info = blocksMap.getStoredBlock(block);
+}
+return info;
   }
 
   /** updates a block in under replication queue */



[5/8] hadoop git commit: HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai Zheng via umamahesh)

2015-02-09 Thread zhz
HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai 
Zheng via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ae52c8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ae52c8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ae52c8e

Branch: refs/heads/HDFS-7285
Commit: 0ae52c8e3a087d7c5d510504e4a30e192ad42679
Parents: 2477b02
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Thu Jan 29 14:15:13 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 17:09:47 2015 -0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 82 +
 .../rawcoder/AbstractRawErasureCoder.java   | 63 +
 .../rawcoder/AbstractRawErasureDecoder.java | 93 
 .../rawcoder/AbstractRawErasureEncoder.java | 93 
 .../erasurecode/rawcoder/RawErasureCoder.java   | 78 
 .../erasurecode/rawcoder/RawErasureDecoder.java | 55 
 .../erasurecode/rawcoder/RawErasureEncoder.java | 54 
 8 files changed, 522 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ae52c8e/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..8ce5a89
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
+
+HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
+(Kai Zheng via umamahesh)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ae52c8e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
new file mode 100644
index 000..f84eb11
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A wrapper for ByteBuffer or bytes array for an erasure code chunk.
+ */
+public class ECChunk {
+
+  private ByteBuffer chunkBuffer;
+
+  /**
+   * Wrapping a ByteBuffer
+   * @param buffer
+   */
+  public ECChunk(ByteBuffer buffer) {
+this.chunkBuffer = buffer;
+  }
+
+  /**
+   * Wrapping a bytes array
+   * @param buffer
+   */
+  public ECChunk(byte[] buffer) {
+this.chunkBuffer = ByteBuffer.wrap(buffer);
+  }
+
+  /**
+   * Convert to ByteBuffer
+   * @return ByteBuffer
+   */
+  public ByteBuffer getBuffer() {
+return chunkBuffer;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers
+   * @param chunks
+   * @return an array of ByteBuffers
+   */
+  public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+for (int i = 0; i  chunks.length; i++) {
+  buffers[i] = chunks[i].getBuffer();
+}
+
+return buffers;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of byte array
+   * @param chunks
+   * @return an array of byte array
+   */
+  public static byte[][] toArray(ECChunk[] chunks) {
+byte[][] bytesArr = new byte[chunks.length][];
+
+for (int i = 0; i  chunks.length; i++) {
+  bytesArr[i] = chunks[i].getBuffer().array();
+}
+
+return bytesArr;
+  }
+}


hadoop git commit: HADOOP-11510. Expose truncate API via FileContext. (yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 00a748d24 - 1b56d1ce3


HADOOP-11510. Expose truncate API via FileContext. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b56d1ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b56d1ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b56d1ce

Branch: refs/heads/trunk
Commit: 1b56d1ce324165688d40c238858e1e19a1e60f7e
Parents: 00a748d
Author: yliu y...@apache.org
Authored: Tue Feb 10 01:45:29 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 01:45:29 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../apache/hadoop/fs/AbstractFileSystem.java|  9 
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  5 +++
 .../apache/hadoop/fs/DelegateToFileSystem.java  |  6 +++
 .../java/org/apache/hadoop/fs/FileContext.java  | 43 
 .../java/org/apache/hadoop/fs/FilterFs.java |  8 
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java |  6 +++
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 17 +++-
 .../org/apache/hadoop/fs/TestAfsCheckPath.java  |  6 +++
 .../main/java/org/apache/hadoop/fs/Hdfs.java|  6 +++
 .../fs/TestHDFSFileContextMainOperations.java   | 32 ++-
 11 files changed, 138 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 55baf8a..aa86360 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -411,6 +411,8 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins 
testing
 job. (Yongjun Zhang and Todd Lipcon via ozawa)
 
+HADOOP-11510. Expose truncate API via FileContext. (yliu)
+
   IMPROVEMENTS
 
 HADOOP-11483. HardLink.java should use the jdk7 createLink method 
(aajisaka)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 92d4eca..975cc3c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -639,6 +639,15 @@ public abstract class AbstractFileSystem {
 
   /**
* The specification of this method matches that of
+   * {@link FileContext#truncate(Path, long)} except that Path f must be for
+   * this file system.
+   */
+  public abstract boolean truncate(Path f, long newLength)
+  throws AccessControlException, FileNotFoundException,
+  UnresolvedLinkException, IOException;
+
+  /**
+   * The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be
* for this file system.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index ab5cd13..7dc4a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
 
   }
 
+  @Override
+  public boolean truncate(Path f, long newLength) throws IOException {
+throw new IOException(Not supported);
+  }
+
   /**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b56d1ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 1cdcb27..09707c6 100644
--- 

[1/8] hadoop git commit: HDFS-7347. Configurable erasure coding policy for individual files and directories ( Contributed by Zhe Zhang )

2015-02-09 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 [created] 93fc299d3


HDFS-7347. Configurable erasure coding policy for individual files and 
directories ( Contributed by Zhe Zhang )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c277894
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c277894
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c277894

Branch: refs/heads/HDFS-7285
Commit: 5c277894ec6ce5ac3bef56e7e8414ea8884680cd
Parents: af08425
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Nov 6 10:03:26 2014 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 16:54:21 2015 -0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  2 +
 .../BlockStoragePolicySuite.java|  5 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java | 12 +++-
 .../TestBlockInitialEncoding.java   | 75 
 5 files changed, 95 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c277894/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..2ef8527
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
+
+HDFS-7347. Configurable erasure coding policy for individual files and
+directories ( Zhe Zhang via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c277894/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 54da8eb..8b3dbd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -170,6 +170,7 @@ public class HdfsConstants {
   public static final String ONESSD_STORAGE_POLICY_NAME = ONE_SSD;
   public static final String HOT_STORAGE_POLICY_NAME = HOT;
   public static final String WARM_STORAGE_POLICY_NAME = WARM;
+  public static final String EC_STORAGE_POLICY_NAME = EC;
   public static final String COLD_STORAGE_POLICY_NAME = COLD;
 
   public static final byte MEMORY_STORAGE_POLICY_ID = 15;
@@ -177,5 +178,6 @@ public class HdfsConstants {
   public static final byte ONESSD_STORAGE_POLICY_ID = 10;
   public static final byte HOT_STORAGE_POLICY_ID = 7;
   public static final byte WARM_STORAGE_POLICY_ID = 5;
+  public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c277894/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index ce87b06..c81dc5b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -77,6 +77,11 @@ public class BlockStoragePolicySuite {
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
+final byte ecId = HdfsConstants.EC_STORAGE_POLICY_ID;
+policies[ecId] = new BlockStoragePolicy(ecId,
+HdfsConstants.EC_STORAGE_POLICY_NAME,
+new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
+new StorageType[]{StorageType.ARCHIVE});
 final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
 policies[coldId] = new BlockStoragePolicy(coldId,
 HdfsConstants.COLD_STORAGE_POLICY_NAME,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c277894/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java

[4/8] hadoop git commit: Fix Compilation Error in TestAddBlockgroup.java after the merge

2015-02-09 Thread zhz
Fix Compilation Error in TestAddBlockgroup.java after the merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2477b02d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2477b02d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2477b02d

Branch: refs/heads/HDFS-7285
Commit: 2477b02d49ee4716d0256a4291b3f10368f8241a
Parents: eb3132b
Author: Jing Zhao ji...@apache.org
Authored: Sun Feb 8 16:01:03 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 17:03:02 2015 -0800

--
 .../apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2477b02d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
index 95133ce..06dfade 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -75,7 +75,7 @@ public class TestAddBlockgroup {
 final Path file1 = new Path(/file1);
 DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L);
 INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
-BlockInfo[] file1Blocks = file1Node.getBlocks();
+BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
 assertEquals(2, file1Blocks.length);
 assertEquals(GROUP_SIZE, file1Blocks[0].numNodes());
 assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP,



[6/8] hadoop git commit: HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai Zheng )

2015-02-09 Thread zhz
HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai 
Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9e1cc22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9e1cc22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9e1cc22

Branch: refs/heads/HDFS-7285
Commit: f9e1cc2233b2ad9e23a018c3bb62b61bcfec0ae1
Parents: 0ae52c8
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Mon Feb 2 14:39:53 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 17:10:04 2015 -0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  5 -
 .../org/apache/hadoop/io/erasurecode/ECChunk.java| 15 +--
 .../rawcoder/AbstractRawErasureCoder.java| 12 ++--
 3 files changed, 23 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9e1cc22/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 8ce5a89..2124800 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -1,4 +1,7 @@
   BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
 
 HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
-(Kai Zheng via umamahesh)
\ No newline at end of file
+(Kai Zheng via umamahesh)
+
+HADOOP-11534. Minor improvements for raw erasure coders
+( Kai Zheng via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9e1cc22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index f84eb11..01e8f35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -66,15 +66,26 @@ public class ECChunk {
   }
 
   /**
-   * Convert an array of this chunks to an array of byte array
+   * Convert an array of this chunks to an array of byte array.
+   * Note the chunk buffers are not affected.
* @param chunks
* @return an array of byte array
*/
   public static byte[][] toArray(ECChunk[] chunks) {
 byte[][] bytesArr = new byte[chunks.length][];
 
+ByteBuffer buffer;
 for (int i = 0; i  chunks.length; i++) {
-  bytesArr[i] = chunks[i].getBuffer().array();
+  buffer = chunks[i].getBuffer();
+  if (buffer.hasArray()) {
+bytesArr[i] = buffer.array();
+  } else {
+bytesArr[i] = new byte[buffer.remaining()];
+// Avoid affecting the original one
+buffer.mark();
+buffer.get(bytesArr[i]);
+buffer.reset();
+  }
 }
 
 return bytesArr;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9e1cc22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 474542b..74d2ab6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -24,26 +24,26 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  */
 public abstract class AbstractRawErasureCoder implements RawErasureCoder {
 
-  private int dataSize;
-  private int paritySize;
+  private int numDataUnits;
+  private int numParityUnits;
   private int chunkSize;
 
   @Override
   public void initialize(int numDataUnits, int numParityUnits,
  int chunkSize) {
-this.dataSize = numDataUnits;
-this.paritySize = numParityUnits;
+this.numDataUnits = numDataUnits;
+this.numParityUnits = numParityUnits;
 this.chunkSize = chunkSize;
   }
 
   @Override
   public int getNumDataUnits() {
-return dataSize;
+return numDataUnits;
   }
 
   @Override
   public int 

[8/8] hadoop git commit: Added the missed entry for commit of HADOOP-11541

2015-02-09 Thread zhz
Added the missed entry for commit of HADOOP-11541


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93fc299d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93fc299d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93fc299d

Branch: refs/heads/HDFS-7285
Commit: 93fc299d30929e3a874e596b267c0cf4edad18b4
Parents: c36a7a9
Author: drankye dran...@gmail.com
Authored: Mon Feb 9 22:04:08 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 17:10:28 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93fc299d/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 2124800..9728f97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -4,4 +4,7 @@
 (Kai Zheng via umamahesh)
 
 HADOOP-11534. Minor improvements for raw erasure coders
-( Kai Zheng via vinayakumarb )
\ No newline at end of file
+( Kai Zheng via vinayakumarb )
+
+HADOOP-11541. Raw XOR coder
+( Kai Zheng )



hadoop git commit: HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu via yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1b56d1ce3 - 260b5e32c


HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu 
via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/260b5e32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/260b5e32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/260b5e32

Branch: refs/heads/trunk
Commit: 260b5e32c427d54c8c74b9f84432700317d1f282
Parents: 1b56d1c
Author: yliu y...@apache.org
Authored: Tue Feb 10 01:57:51 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 01:57:51 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/protocol/DatanodeInfoWithStorage.java  | 63 
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  4 +-
 .../protocol/DatanodeInfoWithStorage.java   | 59 --
 .../blockmanagement/TestDatanodeManager.java|  8 +--
 5 files changed, 72 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/260b5e32/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a77829..a841c7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -887,6 +887,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
 but not StorageIDs. (Milan Desai via Arpit Agarwal)
 
+HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
+Yu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260b5e32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
new file mode 100644
index 000..db2c2e7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeInfoWithStorage extends DatanodeInfo {
+  private final String storageID;
+  private final StorageType storageType;
+
+  public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
+ StorageType storageType) {
+super(from);
+this.storageID = storageID;
+this.storageType = storageType;
+  }
+
+  public String getStorageID() {
+return storageID;
+  }
+
+  public StorageType getStorageType() {
+return storageType;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.hashCode();
+  }
+
+  @Override
+  public String toString() {
+return DatanodeInfoWithStorage[ + super.toString() + , + storageID +
+, + storageType + ];
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260b5e32/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 

hadoop git commit: HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu via yliu)

2015-02-09 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ae316705b - cc0668ebe


HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted Yu 
via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc0668eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc0668eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc0668eb

Branch: refs/heads/branch-2
Commit: cc0668ebe67078878d51d8158ac948383a3e351e
Parents: ae31670
Author: yliu y...@apache.org
Authored: Tue Feb 10 02:00:32 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Feb 10 02:00:32 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/protocol/DatanodeInfoWithStorage.java  | 63 
 .../hadoop/hdfs/protocol/LocatedBlock.java  |  4 +-
 .../protocol/DatanodeInfoWithStorage.java   | 59 --
 .../blockmanagement/TestDatanodeManager.java|  8 +--
 5 files changed, 72 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0668eb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3278dd8..8d49f3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -587,6 +587,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
 but not StorageIDs. (Milan Desai via Arpit Agarwal)
 
+HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
+Yu via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0668eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
new file mode 100644
index 000..db2c2e7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeInfoWithStorage extends DatanodeInfo {
+  private final String storageID;
+  private final StorageType storageType;
+
+  public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
+ StorageType storageType) {
+super(from);
+this.storageID = storageID;
+this.storageType = storageType;
+  }
+
+  public String getStorageID() {
+return storageID;
+  }
+
+  public StorageType getStorageType() {
+return storageType;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+// allows this class to be used interchangeably with DatanodeInfo
+return super.hashCode();
+  }
+
+  @Override
+  public String toString() {
+return DatanodeInfoWithStorage[ + super.toString() + , + storageID +
+, + storageType + ];
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc0668eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 

hadoop git commit: HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or setReadahead is called (cmccabe)

2015-02-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc0668ebe - 1465393d1


HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or 
setReadahead is called (cmccabe)

(cherry picked from commit a9dc5cd7069f721e8c55794b877026ba02537167)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1465393d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1465393d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1465393d

Branch: refs/heads/branch-2
Commit: 1465393d14b6d41c81d9913929d3f4a549cd3293
Parents: cc0668e
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Feb 9 20:16:41 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Feb 9 20:19:48 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++-
 .../server/datanode/TestCachingStrategy.java| 30 
 3 files changed, 36 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1465393d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8d49f3c..eb1637c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -590,6 +590,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
 Yu via yliu)
 
+HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or
+setReadahead is called (cmccabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1465393d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9e75333..618f040 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -580,10 +580,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
 
 // Will be getting a new BlockReader.
-if (blockReader != null) {
-  blockReader.close();
-  blockReader = null;
-}
+closeCurrentBlockReader();
 
 //
 // Connect to best DataNode for desired Block, with potential offset
@@ -686,10 +683,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   unreleased ByteBuffers allocated by read().   +
   Please release  + builder.toString() + .);
 }
-if (blockReader != null) {
-  blockReader.close();
-  blockReader = null;
-}
+closeCurrentBlockReader();
 super.close();
   }
 
@@ -1649,6 +1643,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   DFSClient.LOG.error(error closing blockReader, e);
 }
 blockReader = null;
+blockEnd = -1;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1465393d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
index b1df8ad..709554a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
@@ -369,4 +369,34 @@ public class TestCachingStrategy {
   }
 }
   }
+
+  @Test(timeout=12)
+  public void testSeekAfterSetDropBehind() throws Exception {
+// start a cluster
+LOG.info(testSeekAfterSetDropBehind);
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+String TEST_PATH = /test;
+int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+  .build();
+  cluster.waitActive();
+  FileSystem fs = cluster.getFileSystem();
+  createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
+  // verify that we can seek after setDropBehind
+  FSDataInputStream fis = fs.open(new Path(TEST_PATH));
+  try {
+  

hadoop git commit: MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of reusing conections. Contributed by Kannan Rajah.

2015-02-09 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7e42088ab - 241336ca2


MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of 
reusing conections. Contributed by Kannan Rajah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/241336ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/241336ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/241336ca

Branch: refs/heads/trunk
Commit: 241336ca2b7cf97d7e0bd84dbe0542b72f304dc9
Parents: 7e42088
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Feb 10 03:52:42 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Feb 10 03:52:42 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt| 15 ++
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  | 31 
 .../lib/db/DataDrivenDBInputFormat.java |  5 ++--
 .../lib/db/OracleDataDrivenDBInputFormat.java   |  2 +-
 4 files changed, 37 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/241336ca/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 583c6c1..c71fee8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -346,6 +346,21 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6233. org.apache.hadoop.mapreduce.TestLargeSort.testLargeSort
 failed in trunk (zxu via rkanter)
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of
+reusing conections. (Kannan Rajah via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/241336ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
index c0530c2..00fbeda 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
@@ -159,7 +159,7 @@ public class DBInputFormatT extends DBWritable
 dbConf = new DBConfiguration(conf);
 
 try {
-  getConnection();
+  this.connection = createConnection();
 
   DatabaseMetaData dbMeta = connection.getMetaData();
   this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
@@ -182,18 +182,25 @@ public class DBInputFormatT extends DBWritable
   }
 
   public Connection getConnection() {
+// TODO Remove this code that handles backward compatibility.
+if (this.connection == null) {
+  this.connection = createConnection();
+}
+
+return this.connection;
+  }
+
+  public Connection createConnection() {
 try {
-  if (null == this.connection) {
-// The connection was closed; reinstantiate it.
-this.connection = dbConf.getConnection();
-this.connection.setAutoCommit(false);
-this.connection.setTransactionIsolation(
-Connection.TRANSACTION_SERIALIZABLE);
-  }
+  Connection newConnection = dbConf.getConnection();
+  newConnection.setAutoCommit(false);
+  newConnection.setTransactionIsolation(
+  Connection.TRANSACTION_SERIALIZABLE);
+
+  return newConnection;
 } catch (Exception e) {
   throw new RuntimeException(e);
 }
-return connection;
   }
 
   public String getDBProductName() {
@@ -210,17 +217,17 @@ public class DBInputFormatT extends DBWritable
   if (dbProductName.startsWith(ORACLE)) {
 // use Oracle-specific db reader.
 return new OracleDBRecordReaderT(split, inputClass,
-conf, getConnection(), getDBConf(), conditions, fieldNames,
+conf, createConnection(), getDBConf(), conditions, fieldNames,
 tableName);
   } else if (dbProductName.startsWith(MYSQL)) {
 // use MySQL-specific db reader.
 return new MySQLDBRecordReaderT(split, inputClass,
-conf, getConnection(), getDBConf(), conditions, fieldNames,
+conf, createConnection(), getDBConf(), 

hadoop git commit: MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of reusing conections. Contributed by Kannan Rajah.

2015-02-09 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 ef212fbe7 - 5b3d9bf63


MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of 
reusing conections. Contributed by Kannan Rajah.

(cherry picked from commit 241336ca2b7cf97d7e0bd84dbe0542b72f304dc9)

Conflicts:
hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b3d9bf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b3d9bf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b3d9bf6

Branch: refs/heads/branch-2.6
Commit: 5b3d9bf6366a6462c24de48aece4a52020eadd64
Parents: ef212fb
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Feb 10 03:52:42 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Feb 10 03:54:54 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt| 15 ++
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  | 31 
 .../lib/db/DataDrivenDBInputFormat.java |  5 ++--
 .../lib/db/OracleDataDrivenDBInputFormat.java   |  2 +-
 4 files changed, 37 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3d9bf6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 219e06c..ae06c85 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,5 +1,20 @@
 Hadoop MapReduce Change Log
 
+Release 2.6.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of
+reusing conections. (Kannan Rajah via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3d9bf6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
index c0530c2..00fbeda 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
@@ -159,7 +159,7 @@ public class DBInputFormatT extends DBWritable
 dbConf = new DBConfiguration(conf);
 
 try {
-  getConnection();
+  this.connection = createConnection();
 
   DatabaseMetaData dbMeta = connection.getMetaData();
   this.dbProductName = dbMeta.getDatabaseProductName().toUpperCase();
@@ -182,18 +182,25 @@ public class DBInputFormatT extends DBWritable
   }
 
   public Connection getConnection() {
+// TODO Remove this code that handles backward compatibility.
+if (this.connection == null) {
+  this.connection = createConnection();
+}
+
+return this.connection;
+  }
+
+  public Connection createConnection() {
 try {
-  if (null == this.connection) {
-// The connection was closed; reinstantiate it.
-this.connection = dbConf.getConnection();
-this.connection.setAutoCommit(false);
-this.connection.setTransactionIsolation(
-Connection.TRANSACTION_SERIALIZABLE);
-  }
+  Connection newConnection = dbConf.getConnection();
+  newConnection.setAutoCommit(false);
+  newConnection.setTransactionIsolation(
+  Connection.TRANSACTION_SERIALIZABLE);
+
+  return newConnection;
 } catch (Exception e) {
   throw new RuntimeException(e);
 }
-return connection;
   }
 
   public String getDBProductName() {
@@ -210,17 +217,17 @@ public class DBInputFormatT extends DBWritable
   if (dbProductName.startsWith(ORACLE)) {
 // use Oracle-specific db reader.
 return new OracleDBRecordReaderT(split, inputClass,
-conf, getConnection(), getDBConf(), conditions, fieldNames,
+conf, createConnection(), getDBConf(), conditions, fieldNames,
 tableName);
   } else if (dbProductName.startsWith(MYSQL)) {
 // use MySQL-specific db reader.
 return new MySQLDBRecordReaderT(split, inputClass,
-conf, getConnection(), getDBConf(), conditions, fieldNames,
+conf, createConnection(), 

hadoop git commit: HADOOP-11512. Use getTrimmedStrings when reading serialization keys. Contributed by Ryan P.

2015-02-09 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2b722b904 - 442bc776d


HADOOP-11512. Use getTrimmedStrings when reading serialization keys. 
Contributed by Ryan P.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/442bc776
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/442bc776
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/442bc776

Branch: refs/heads/branch-2
Commit: 442bc776db7642eae02ee83231ecdbec78d3cb78
Parents: 2b722b9
Author: Harsh J ha...@cloudera.com
Authored: Mon Feb 9 11:10:45 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Feb 10 12:52:41 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../hadoop/io/serializer/SerializationFactory.java   |  2 +-
 .../hadoop/io/serializer/TestSerializationFactory.java   | 11 +++
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/442bc776/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a61c349..fec5613 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -202,6 +202,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11512. Use getTrimmedStrings when reading serialization keys
+(Ryan P via harsh)
+
 HADOOP-11488. Difference in default connection timeout for S3A FS
 (Daisuke Kobayashi via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/442bc776/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
index 52a0a25..aa3c86a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
@@ -55,7 +55,7 @@ public class SerializationFactory extends Configured {
*/
   public SerializationFactory(Configuration conf) {
 super(conf);
-for (String serializerName : conf.getStrings(
+for (String serializerName : conf.getTrimmedStrings(
   CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
   new String[]{WritableSerialization.class.getName(),
 AvroSpecificSerialization.class.getName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/442bc776/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index 18c2637..b3c8bee 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.serializer;
 
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotNull;
@@ -41,4 +43,13 @@ public class TestSerializationFactory {
 assertNull(A null should be returned if there are no deserializers found,
 factory.getDeserializer(TestSerializationFactory.class));
   }
+
+  @Test
+  public void testSerializationKeyIsTrimmed() {
+Configuration conf = new Configuration();
+conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,  
org.apache.hadoop.io.serializer.WritableSerialization );
+SerializationFactory factory = new SerializationFactory(conf);
+assertNotNull(Valid class must be returned,
+  factory.getSerializer(LongWritable.class));
+   }
 }



hadoop git commit: HADOOP-11512. Use getTrimmedStrings when reading serialization keys. Contributed by Ryan P.

2015-02-09 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk b73956fda - e0ec0718d


HADOOP-11512. Use getTrimmedStrings when reading serialization keys. 
Contributed by Ryan P.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0ec0718
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0ec0718
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0ec0718

Branch: refs/heads/trunk
Commit: e0ec0718d033e84bda2ebeab7beb00b7dbd990c0
Parents: b73956f
Author: Harsh J ha...@cloudera.com
Authored: Mon Feb 9 10:41:25 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Feb 10 12:51:56 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../apache/hadoop/io/serializer/SerializationFactory.java |  2 +-
 .../hadoop/io/serializer/TestSerializationFactory.java| 10 ++
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ec0718/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index aa86360..8b80998 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -588,6 +588,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11512. Use getTrimmedStrings when reading serialization keys
+(Ryan P via harsh)
+
 HADOOP-11488. Difference in default connection timeout for S3A FS
 (Daisuke Kobayashi via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ec0718/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
index d6c6588..3f177f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
@@ -60,7 +60,7 @@ public class SerializationFactory extends Configured {
   + CommonConfigurationKeys.IO_SERIALIZATIONS_KEY
   +  properly to have serialization support (it is currently not 
set).);
 } else {
-  for (String serializerName : conf.getStrings(
+  for (String serializerName : conf.getTrimmedStrings(
   CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, new String[] {
   WritableSerialization.class.getName(),
   AvroSpecificSerialization.class.getName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ec0718/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index c5805be..6774155 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.io.serializer;
 
+import org.apache.hadoop.io.LongWritable;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
@@ -76,4 +77,13 @@ public class TestSerializationFactory {
 assertNull(A null should be returned if there are no deserializers found,
 factory.getDeserializer(TestSerializationFactory.class));
   }
+
+  @Test
+  public void testSerializationKeyIsTrimmed() {
+Configuration conf = new Configuration();
+conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,  
org.apache.hadoop.io.serializer.WritableSerialization );
+SerializationFactory factory = new SerializationFactory(conf);
+assertNotNull(Valid class must be returned,
+ factory.getSerializer(LongWritable.class));
+   }
 }



hadoop git commit: HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe Zhang

2015-02-09 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC 71a67e955 - c4362d568


HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe 
Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4362d56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4362d56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4362d56

Branch: refs/heads/HDFS-EC
Commit: c4362d568855e886e2b44c73106304d769b63c96
Parents: 71a67e9
Author: Zhe Zhang z...@apache.org
Authored: Mon Feb 9 10:27:14 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Feb 9 10:27:14 2015 -0800

--
 .../server/blockmanagement/BlockIdManager.java|  8 
 .../hdfs/server/blockmanagement/BlockManager.java | 18 +-
 2 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4362d56/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index c8b9d20..e7f8a05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -211,4 +211,12 @@ public class BlockIdManager {
   .LAST_RESERVED_BLOCK_ID);
 generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
   }
+
+  public static boolean isStripedBlockID(long id) {
+return id  0;
+  }
+
+  public static long convertToGroupID(long id) {
+return id  (~(HdfsConstants.MAX_BLOCKS_IN_GROUP - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4362d56/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3fe47af..8610b79 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1872,7 +1872,7 @@ public class BlockManager {
   break;
 }
 
-BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
+BlockInfoContiguous bi = getStoredBlock(b);
 if (bi == null) {
   if (LOG.isDebugEnabled()) {
 LOG.debug(BLOCK* rescanPostponedMisreplicatedBlocks:  +
@@ -1977,7 +1977,7 @@ public class BlockManager {
 continue;
   }
   
-  BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
+  BlockInfoContiguous storedBlock = getStoredBlock(iblk);
   // If block does not belong to any file, we are done.
   if (storedBlock == null) continue;
   
@@ -2119,7 +2119,7 @@ public class BlockManager {
 }
 
 // find block by blockId
-BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
+BlockInfoContiguous storedBlock = getStoredBlock(block);
 if(storedBlock == null) {
   // If blocksMap does not contain reported block id,
   // the replica should be removed from the data-node.
@@ -2410,7 +2410,7 @@ public class BlockManager {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 if (block instanceof BlockInfoContiguousUnderConstruction) {
   //refresh our copy in case the block got completed in another thread
-  storedBlock = blocksMap.getStoredBlock(block);
+  storedBlock = getStoredBlock(block);
 } else {
   storedBlock = block;
 }
@@ -3356,7 +3356,15 @@ public class BlockManager {
   }
 
   public BlockInfoContiguous getStoredBlock(Block block) {
-return blocksMap.getStoredBlock(block);
+BlockInfoContiguous info = null;
+if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  info = blocksMap.getStoredBlock(
+  new Block(BlockIdManager.convertToGroupID(block.getBlockId(;
+}
+if (info == null) {
+  info = blocksMap.getStoredBlock(block);
+}
+return info;
   }
 
   /** updates a block in under replication queue */



hadoop git commit: HDFS-7670. HDFS Quota guide has typos, incomplete command lines (Brahma Reddy Battula via aw)

2015-02-09 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 30b797ee9 - 84cc071a7


HDFS-7670. HDFS Quota guide has typos, incomplete command lines (Brahma Reddy 
Battula via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84cc071a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84cc071a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84cc071a

Branch: refs/heads/trunk
Commit: 84cc071a7bfc9ba7ec1674fd41a38a33fd9cdf12
Parents: 30b797e
Author: Allen Wittenauer a...@apache.org
Authored: Mon Feb 9 12:54:03 2015 -0800
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Feb 9 12:54:03 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm   | 10 +-
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cc071a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4396e3d..0d9000a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -287,6 +287,9 @@ Trunk (Unreleased)
 HDFS-7751. Fix TestHDFSCLI for quota with storage type.  (Xiaoyu Yao
 via szetszwo)
 
+HDFS-7670. HDFS Quota guide has typos, incomplete command lines
+(Brahma Reddy Battula via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cc071a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
index 0821946..222cfbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
@@ -71,20 +71,20 @@ HDFS Quotas Guide
Quotas are managed by a set of commands available only to the
administrator.
 
- * dfsadmin -setQuota N directory...directory 
+ * hdfs dfsadmin -setQuota N directory...directory 
 
Set the name quota to be N for each directory. Best effort for each
directory, with faults reported if N is not a positive long
integer, the directory does not exist or it is a file, or the
directory would immediately exceed the new quota.
 
- * dfsadmin -clrQuota directory...directory 
+ * hdfs dfsadmin -clrQuota directory...directory 
 
Remove any name quota for each directory. Best effort for each
directory, with faults reported if the directory does not exist or
it is a file. It is not a fault if the directory has no quota.
 
- * dfsadmin -setSpaceQuota N directory...directory 
+ * hdfs dfsadmin -setSpaceQuota N directory...directory 
 
Set the space quota to be N bytes for each directory. This is a
hard limit on total size of all the files under the directory tree.
@@ -96,7 +96,7 @@ HDFS Quotas Guide
integer, the directory does not exist or it is a file, or the
directory would immediately exceed the new quota.
 
- * dfsadmin -clrSpaceQuota directory...director 
+ * hdfs dfsadmin -clrSpaceQuota directory...directory 
 
Remove any space quota for each directory. Best effort for each
directory, with faults reported if the directory does not exist or
@@ -107,7 +107,7 @@ HDFS Quotas Guide
An an extension to the count command of the HDFS shell reports quota
values and the current count of names and bytes in use.
 
- * fs -count -q directory...directory 
+ * hadoop fs -count -q directory...directory 
 
With the -q option, also report the name quota value set for each
directory, the available name quota remaining, the space quota



hadoop git commit: HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via aw)

2015-02-09 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab934e859 - 30b797ee9


HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via 
aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30b797ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30b797ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30b797ee

Branch: refs/heads/trunk
Commit: 30b797ee9df30260314eeadffc7d51492871b352
Parents: ab934e8
Author: Allen Wittenauer a...@apache.org
Authored: Mon Feb 9 12:50:44 2015 -0800
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Feb 9 12:50:44 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/fs/shell/Ls.java | 187 ---
 .../src/site/apt/FileSystemShell.apt.vm |  22 ++-
 .../src/test/resources/testConf.xml |  34 +++-
 .../src/test/resources/testHDFSConf.xml | 148 +++
 5 files changed, 361 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30b797ee/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b02e695..55baf8a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -24,6 +24,9 @@ Trunk (Unreleased)
 
 HADOOP-11485. Pluggable shell integration (aw)
 
+HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
+via aw)
+
   IMPROVEMENTS
 
 HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30b797ee/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index c7e80b6..0e46700 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
 import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Comparator;
 import java.util.Date;
 import java.util.LinkedList;
 import org.apache.hadoop.util.StringUtils;
@@ -40,29 +42,59 @@ class Ls extends FsCommand {
 factory.addClass(Ls.class, -ls);
 factory.addClass(Lsr.class, -lsr);
   }
-  
+
+  private static final String OPTION_DIRECTORY = d;
+  private static final String OPTION_HUMAN = h;
+  private static final String OPTION_RECURSIVE = R;
+  private static final String OPTION_REVERSE = r;
+  private static final String OPTION_MTIME = t;
+  private static final String OPTION_ATIME = u;
+  private static final String OPTION_SIZE = S;
+
   public static final String NAME = ls;
-  public static final String USAGE = [-d] [-h] [-R] [path ...];
+  public static final String USAGE = [- + OPTION_DIRECTORY + ] [-
+  + OPTION_HUMAN + ]  + [- + OPTION_RECURSIVE + ] [- + OPTION_MTIME
+  + ] [- + OPTION_SIZE + ] [- + OPTION_REVERSE + ]  + [-
+  + OPTION_ATIME + ] [path ...];
+
   public static final String DESCRIPTION =
-   List the contents that match the specified file pattern. 
If  +
-   path is not specified, the contents of /user/currentUser 
 +
-   will be listed. Directory entries are of the form:\n +
-   \tpermissions - userId groupId sizeOfDirectory(in bytes) 
modificationDate(-MM-dd HH:mm) directoryName\n\n +
-   and file entries are of the form:\n +
-   \tpermissions numberOfReplicas userId groupId 
sizeOfFile(in bytes) modificationDate(-MM-dd HH:mm) fileName\n +
-   -d:  Directories are listed as plain files.\n +
-   -h:  Formats the sizes of files in a human-readable 
fashion  +
-   rather than a number of bytes.\n +
-   -R:  Recursively list the contents of directories.;
- 
-  
-
-  protected final SimpleDateFormat dateFormat =
+  List the contents that match the specified file pattern. If  +
+  path is not specified, the contents of /user/currentUser  +
+  will be listed. For a directory a list of its direct children  +
+  is returned (unless - + OPTION_DIRECTORY +
+   option is specified).\n\n +
+  Directory entries are of the form:\n +
+  

[2/2] hadoop git commit: HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos but not StorageIDs. (Contributed by Milan Desai)

2015-02-09 Thread arp
HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos but not 
StorageIDs. (Contributed by Milan Desai)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff900eb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff900eb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff900eb6

Branch: refs/heads/branch-2
Commit: ff900eb64a59e216af6073c769f9acdff8d4f812
Parents: b1aad1d
Author: Arpit Agarwal a...@apache.org
Authored: Mon Feb 9 12:17:40 2015 -0800
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Feb 9 12:17:53 2015 -0800

--
 .../org/apache/hadoop/net/NetworkTopology.java  |  2 +-
 .../net/NetworkTopologyWithNodeGroup.java   |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 77 ++
 .../server/blockmanagement/DatanodeManager.java |  2 +
 .../protocol/DatanodeInfoWithStorage.java   | 59 ++
 .../apache/hadoop/hdfs/TestDecommission.java| 10 ++-
 .../blockmanagement/TestDatanodeManager.java| 84 
 8 files changed, 218 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff900eb6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index a11ba9c..02b0005 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -860,7 +860,7 @@ public class NetworkTopology {
 // Start off by initializing to off rack
 int weight = 2;
 if (reader != null) {
-  if (reader == node) {
+  if (reader.equals(node)) {
 weight = 0;
   } else if (isOnSameRack(reader, node)) {
 weight = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff900eb6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
index 13160eb..3de49dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -254,7 +254,7 @@ public class NetworkTopologyWithNodeGroup extends 
NetworkTopology {
 // Start off by initializing to off rack
 int weight = 3;
 if (reader != null) {
-  if (reader == node) {
+  if (reader.equals(node)) {
 weight = 0;
   } else if (isOnSameNodeGroup(reader, node)) {
 weight = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff900eb6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 86a43e7..3278dd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -584,6 +584,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and
 HdfsDataInputStream. (yliu)
 
+HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
+but not StorageIDs. (Milan Desai via Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff900eb6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 30368f6..7fb2e30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 

[1/2] hadoop git commit: HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos but not StorageIDs. (Contributed by Milan Desai)

2015-02-09 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b1aad1d94 - ff900eb64
  refs/heads/trunk 241336ca2 - ab934e859


HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos but not 
StorageIDs. (Contributed by Milan Desai)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab934e85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab934e85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab934e85

Branch: refs/heads/trunk
Commit: ab934e85947dcf2092050023909dd81ae274ff45
Parents: 241336c
Author: Arpit Agarwal a...@apache.org
Authored: Mon Feb 9 12:17:40 2015 -0800
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Feb 9 12:17:40 2015 -0800

--
 .../org/apache/hadoop/net/NetworkTopology.java  |  2 +-
 .../net/NetworkTopologyWithNodeGroup.java   |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 77 ++
 .../server/blockmanagement/DatanodeManager.java |  2 +
 .../protocol/DatanodeInfoWithStorage.java   | 59 ++
 .../apache/hadoop/hdfs/TestDecommission.java| 10 ++-
 .../blockmanagement/TestDatanodeManager.java| 84 
 8 files changed, 218 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab934e85/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index aaa5ae3..fc8bf52 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -859,7 +859,7 @@ public class NetworkTopology {
 // Start off by initializing to off rack
 int weight = 2;
 if (reader != null) {
-  if (reader == node) {
+  if (reader.equals(node)) {
 weight = 0;
   } else if (isOnSameRack(reader, node)) {
 weight = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab934e85/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
index 13160eb..3de49dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -254,7 +254,7 @@ public class NetworkTopologyWithNodeGroup extends 
NetworkTopology {
 // Start off by initializing to off rack
 int weight = 3;
 if (reader != null) {
-  if (reader == node) {
+  if (reader.equals(node)) {
 weight = 0;
   } else if (isOnSameNodeGroup(reader, node)) {
 weight = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab934e85/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index eda3744..4396e3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -872,6 +872,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7741. Remove unnecessary synchronized in FSDataInputStream and
 HdfsDataInputStream. (yliu)
 
+HDFS-7647. DatanodeManager.sortLocatedBlocks sorts DatanodeInfos
+but not StorageIDs. (Milan Desai via Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab934e85/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 30368f6..7fb2e30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import 

hadoop git commit: HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via aw) (missed file)

2015-02-09 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 84cc071a7 - 576459801


HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via 
aw) (missed file)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57645980
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57645980
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57645980

Branch: refs/heads/trunk
Commit: 576459801c4e21effc4e3bca796527896b6e4f4b
Parents: 84cc071
Author: Allen Wittenauer a...@apache.org
Authored: Mon Feb 9 12:54:25 2015 -0800
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Feb 9 12:54:25 2015 -0800

--
 .../java/org/apache/hadoop/fs/shell/TestLs.java | 1308 ++
 1 file changed, 1308 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57645980/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
new file mode 100644
index 000..66403db
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
@@ -0,0 +1,1308 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URI;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.LinkedList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FilterFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.InOrder;
+
+/**
+ * JUnit test class for {@link org.apache.hadoop.fs.shell.Ls}
+ *
+ */
+public class TestLs {
+  private static Configuration conf;
+  private static FileSystem mockFs;
+
+  private static final Date NOW = new Date();
+
+  @BeforeClass
+  public static void setup() throws IOException {
+conf = new Configuration();
+conf.set(fs.defaultFS, mockfs:///);
+conf.setClass(fs.mockfs.impl, MockFileSystem.class, FileSystem.class);
+mockFs = mock(FileSystem.class);
+  }
+
+  @Before
+  public void resetMock() throws IOException {
+reset(mockFs);
+AclStatus mockAclStatus = mock(AclStatus.class);
+when(mockAclStatus.getEntries()).thenReturn(new ArrayListAclEntry());
+when(mockFs.getAclStatus(any(Path.class))).thenReturn(mockAclStatus);
+  }
+
+  // check that default options are correct
+  @Test
+  public void processOptionsNone() throws IOException {
+LinkedListString options = new LinkedListString();
+Ls ls = new Ls();
+ls.processOptions(options);
+assertTrue(ls.isDirRecurse());
+assertFalse(ls.isHumanReadable());
+assertFalse(ls.isRecursive());
+assertFalse(ls.isOrderReverse());
+assertFalse(ls.isOrderSize());
+assertFalse(ls.isOrderTime());
+assertFalse(ls.isUseAtime());
+  }
+
+  // check the -d option is recognised
+  @Test
+  public void processOptionsDirectory() throws IOException {
+LinkedListString options = new LinkedListString();
+options.add(-d);
+Ls ls = new Ls();
+ls.processOptions(options);
+assertFalse(ls.isDirRecurse());
+assertFalse(ls.isHumanReadable());
+assertFalse(ls.isRecursive());
+assertFalse(ls.isOrderReverse());
+assertFalse(ls.isOrderSize());
+assertFalse(ls.isOrderTime());
+assertFalse(ls.isUseAtime());
+  }
+
+  // check the -h option 

hadoop git commit: MAPREDUCE-4413. MR lib dir contains jdiff (which is gpl) (Nemon Lou via aw)

2015-02-09 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 63613c79c - aab459c90


MAPREDUCE-4413. MR lib dir contains jdiff (which is gpl) (Nemon Lou via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aab459c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aab459c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aab459c9

Branch: refs/heads/trunk
Commit: aab459c904bf2007c5b230af8c058793935faf89
Parents: 63613c7
Author: Allen Wittenauer a...@apache.org
Authored: Mon Feb 9 14:02:47 2015 -0800
Committer: Allen Wittenauer a...@apache.org
Committed: Mon Feb 9 14:02:47 2015 -0800

--
 .../src/main/resources/assemblies/hadoop-mapreduce-dist.xml| 1 +
 hadoop-mapreduce-project/CHANGES.txt   | 2 ++
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab459c9/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
index 247b09c..df08c6c 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
@@ -195,6 +195,7 @@
 excludeorg.slf4j:slf4j-api/exclude
 excludeorg.slf4j:slf4j-log4j12/exclude
 excludeorg.hsqldb:hsqldb/exclude
+excludejdiff:jdiff:jar/exclude
   /excludes
 /dependencySet
 dependencySet

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab459c9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c71fee8..246f18d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -168,6 +168,8 @@ Trunk (Unreleased)
 MAPREDUCE-6161. mapred hsadmin command missing from trunk (Allen Wittenauer
 via jlowe)
 
+MAPREDUCE-4413. MR lib dir contains jdiff (which is gpl) (Nemon Lou via aw)
+
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
 MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by



hadoop git commit: YARN-2971. RM uses conf instead of token service address to renew timeline delegation tokens (jeagles)

2015-02-09 Thread jeagles
Repository: hadoop
Updated Branches:
  refs/heads/trunk aab459c90 - af0842589


YARN-2971. RM uses conf instead of token service address to renew timeline 
delegation tokens (jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af084258
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af084258
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af084258

Branch: refs/heads/trunk
Commit: af0842589359ad800427337ad2c84fac09907f72
Parents: aab459c
Author: Jonathan Eagles jeag...@gmail.com
Authored: Mon Feb 9 17:56:05 2015 -0600
Committer: Jonathan Eagles jeag...@gmail.com
Committed: Mon Feb 9 17:56:05 2015 -0600

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../client/api/impl/TimelineClientImpl.java | 22 ++--
 .../client/api/impl/TestTimelineClient.java | 14 +++--
 3 files changed, 31 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af084258/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 578a8cc..634a0e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -519,6 +519,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3094. Reset timer for liveness monitors after RM recovery. (Jun Gong
 via jianhe)
 
+YARN-2971. RM uses conf instead of token service address to renew timeline
+delegation tokens (jeagles)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af084258/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index de9d8da..0b88632 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -45,6 +46,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
@@ -373,12 +375,14 @@ public class TimelineClientImpl extends TimelineClient {
 == UserGroupInformation.AuthenticationMethod.PROXY;
 final String doAsUser = isProxyAccess ?
 UserGroupInformation.getCurrentUser().getShortUserName() : null;
+boolean useHttps = YarnConfiguration.useHttps(this.getConfig());
+final String scheme = useHttps ? https : http;
+final InetSocketAddress address = 
SecurityUtil.getTokenServiceAddr(timelineDT);
 PrivilegedExceptionActionLong renewDTAction =
 new PrivilegedExceptionActionLong() {
 
   @Override
-  public Long run()
-  throws Exception {
+  public Long run() throws Exception {
 // If the timeline DT to renew is different than cached, replace 
it.
 // Token to set every time for retry, because when exception 
happens,
 // DelegationTokenAuthenticatedURL will reset it to null;
@@ -388,8 +392,10 @@ public class TimelineClientImpl extends TimelineClient {
 DelegationTokenAuthenticatedURL authUrl =
 new DelegationTokenAuthenticatedURL(authenticator,
 connConfigurator);
+final URI serviceURI = new URI(scheme, null, address.getHostName(),
+address.getPort(), RESOURCE_URI_STR, null, null);
 return authUrl
-.renewDelegationToken(resURI.toURL(), token, doAsUser);
+.renewDelegationToken(serviceURI.toURL(), token, doAsUser);
   }
 };
 return (Long) operateDelegationToken(renewDTAction);
@@ -405,12 +411,14 @@ public class 

hadoop git commit: HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or setReadahead is called (cmccabe)

2015-02-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 260b5e32c - a9dc5cd70


HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or 
setReadahead is called (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9dc5cd7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9dc5cd7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9dc5cd7

Branch: refs/heads/trunk
Commit: a9dc5cd7069f721e8c55794b877026ba02537167
Parents: 260b5e3
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Feb 9 20:16:41 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Feb 9 20:16:41 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++-
 .../server/datanode/TestCachingStrategy.java| 30 
 3 files changed, 36 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9dc5cd7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a841c7e..446c6a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -890,6 +890,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7756. Restore method signature for LocatedBlock#getLocations(). (Ted
 Yu via yliu)
 
+HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or
+setReadahead is called (cmccabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9dc5cd7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9e75333..618f040 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -580,10 +580,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
 
 // Will be getting a new BlockReader.
-if (blockReader != null) {
-  blockReader.close();
-  blockReader = null;
-}
+closeCurrentBlockReader();
 
 //
 // Connect to best DataNode for desired Block, with potential offset
@@ -686,10 +683,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   unreleased ByteBuffers allocated by read().   +
   Please release  + builder.toString() + .);
 }
-if (blockReader != null) {
-  blockReader.close();
-  blockReader = null;
-}
+closeCurrentBlockReader();
 super.close();
   }
 
@@ -1649,6 +1643,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   DFSClient.LOG.error(error closing blockReader, e);
 }
 blockReader = null;
+blockEnd = -1;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9dc5cd7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
index b1df8ad..709554a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
@@ -369,4 +369,34 @@ public class TestCachingStrategy {
   }
 }
   }
+
+  @Test(timeout=12)
+  public void testSeekAfterSetDropBehind() throws Exception {
+// start a cluster
+LOG.info(testSeekAfterSetDropBehind);
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+String TEST_PATH = /test;
+int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+  .build();
+  cluster.waitActive();
+  FileSystem fs = cluster.getFileSystem();
+  createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
+  // verify that we can seek after setDropBehind
+  FSDataInputStream fis = fs.open(new Path(TEST_PATH));
+  try {
+Assert.assertTrue(fis.read() != -1); // create BlockReader
+

hadoop git commit: HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider threads when using FileContext (Arun Suresh via Colin P. McCabe)

2015-02-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk a9dc5cd70 - 02340a24f


HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider 
threads when using FileContext (Arun Suresh via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02340a24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02340a24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02340a24

Branch: refs/heads/trunk
Commit: 02340a24f211212b91dc7380c1e5b54ddb5e82eb
Parents: a9dc5cd
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Feb 9 20:23:23 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Feb 9 20:23:23 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/ClientContext.java   |  14 ++-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  45 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 ++
 .../apache/hadoop/hdfs/KeyProviderCache.java| 109 
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  10 +-
 .../hadoop/hdfs/TestEncryptionZonesWithHA.java  |   2 +-
 .../hadoop/hdfs/TestEncryptionZonesWithKMS.java |   2 +-
 .../hadoop/hdfs/TestKeyProviderCache.java   | 124 +++
 .../hadoop/hdfs/TestReservedRawPaths.java   |   4 +-
 apache.hadoop.crypto.key.KeyProviderFactory |  16 +++
 11 files changed, 304 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02340a24/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 446c6a3..4a6bc11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -893,6 +893,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or
 setReadahead is called (cmccabe)
 
+HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
+threads when using FileContext (Arun Suresh via Colin P. McCabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02340a24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index e106fca..af7c095 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
 
 /**
  * ClientContext contains context information for a client.
@@ -72,6 +73,10 @@ public class ClientContext {
   private final DomainSocketFactory domainSocketFactory;
 
   /**
+   * Caches key Providers for the DFSClient
+   */
+  private final KeyProviderCache keyProviderCache;
+  /**
* True if we should use the legacy BlockReaderLocal.
*/
   private final boolean useLegacyBlockReaderLocal;
@@ -107,6 +112,7 @@ public class ClientContext {
 conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
 this.peerCache =
   new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
+this.keyProviderCache = new 
KeyProviderCache(conf.keyProviderCacheExpiryMs);
 this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
 this.domainSocketFactory = new DomainSocketFactory(conf);
 
@@ -138,7 +144,9 @@ public class ClientContext {
   append(, domainSocketDataTraffic = ).
   append(conf.domainSocketDataTraffic).
   append(, shortCircuitSharedMemoryWatcherInterruptCheckMs = ).
-  append(conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
+  append(conf.shortCircuitSharedMemoryWatcherInterruptCheckMs).
+  append(, keyProviderCacheExpiryMs = ).
+  append(conf.keyProviderCacheExpiryMs);
 
 return builder.toString();
   }
@@ -195,6 +203,10 @@ public class ClientContext {
 return peerCache;
   }
 
+  public KeyProviderCache getKeyProviderCache() {
+return keyProviderCache;
+  }
+
   public boolean getUseLegacyBlockReaderLocal() {
 return useLegacyBlockReaderLocal;
   }


hadoop git commit: HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider threads when using FileContext (Arun Suresh via Colin P. McCabe)

2015-02-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1465393d1 - 643a8892d


HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider 
threads when using FileContext (Arun Suresh via Colin P. McCabe)

(cherry picked from commit 02340a24f211212b91dc7380c1e5b54ddb5e82eb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/643a8892
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/643a8892
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/643a8892

Branch: refs/heads/branch-2
Commit: 643a8892d1a5a15d2f9de273d4df09dc0c84bde0
Parents: 1465393
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Feb 9 20:23:23 2015 -0800
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Feb 9 20:25:31 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/ClientContext.java   |  14 ++-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  45 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 ++
 .../apache/hadoop/hdfs/KeyProviderCache.java| 109 
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  10 +-
 .../hadoop/hdfs/TestEncryptionZonesWithHA.java  |   2 +-
 .../hadoop/hdfs/TestEncryptionZonesWithKMS.java |   2 +-
 .../hadoop/hdfs/TestKeyProviderCache.java   | 124 +++
 .../hadoop/hdfs/TestReservedRawPaths.java   |   4 +-
 apache.hadoop.crypto.key.KeyProviderFactory |  16 +++
 11 files changed, 304 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/643a8892/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index eb1637c..06e40bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -593,6 +593,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7744. Fix potential NPE in DFSInputStream after setDropBehind or
 setReadahead is called (cmccabe)
 
+HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
+threads when using FileContext (Arun Suresh via Colin P. McCabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/643a8892/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index e106fca..af7c095 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
 
 /**
  * ClientContext contains context information for a client.
@@ -72,6 +73,10 @@ public class ClientContext {
   private final DomainSocketFactory domainSocketFactory;
 
   /**
+   * Caches key Providers for the DFSClient
+   */
+  private final KeyProviderCache keyProviderCache;
+  /**
* True if we should use the legacy BlockReaderLocal.
*/
   private final boolean useLegacyBlockReaderLocal;
@@ -107,6 +112,7 @@ public class ClientContext {
 conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
 this.peerCache =
   new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
+this.keyProviderCache = new 
KeyProviderCache(conf.keyProviderCacheExpiryMs);
 this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
 this.domainSocketFactory = new DomainSocketFactory(conf);
 
@@ -138,7 +144,9 @@ public class ClientContext {
   append(, domainSocketDataTraffic = ).
   append(conf.domainSocketDataTraffic).
   append(, shortCircuitSharedMemoryWatcherInterruptCheckMs = ).
-  append(conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
+  append(conf.shortCircuitSharedMemoryWatcherInterruptCheckMs).
+  append(, keyProviderCacheExpiryMs = ).
+  append(conf.keyProviderCacheExpiryMs);
 
 return builder.toString();
   }
@@ -195,6 +203,10 @@ public class ClientContext {
 return peerCache;
   }
 
+  public KeyProviderCache getKeyProviderCache() {
+return keyProviderCache;
+  }
+
   public boolean getUseLegacyBlockReaderLocal() {
 return useLegacyBlockReaderLocal;
   }


hadoop git commit: YARN-3100. Made YARN authorization pluggable. Contributed by Jian He.

2015-02-09 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 02340a24f - 23bf6c720


YARN-3100. Made YARN authorization pluggable. Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23bf6c72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23bf6c72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23bf6c72

Branch: refs/heads/trunk
Commit: 23bf6c72071782e3fd5a628e21495d6b974c7a9e
Parents: 02340a2
Author: Zhijie Shen zjs...@apache.org
Authored: Mon Feb 9 20:34:56 2015 -0800
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Feb 9 20:34:56 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +
 .../apache/hadoop/yarn/security/AccessType.java |  33 ++
 .../hadoop/yarn/security/AdminACLsManager.java  |  31 -
 .../yarn/security/ConfiguredYarnAuthorizer.java |  97 
 .../hadoop/yarn/security/PrivilegedEntity.java  |  83 ++
 .../security/YarnAuthorizationProvider.java | 112 +++
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |   7 +-
 .../server/resourcemanager/AdminService.java|  34 +++---
 .../server/resourcemanager/RMServerUtils.java   |  25 +++--
 .../nodelabels/RMNodeLabelsManager.java |  12 +-
 .../scheduler/SchedulerUtils.java   |  13 +++
 .../scheduler/capacity/AbstractCSQueue.java |  43 +++
 .../scheduler/capacity/CapacityScheduler.java   |  20 +++-
 .../CapacitySchedulerConfiguration.java |  10 +-
 .../scheduler/capacity/LeafQueue.java   |   7 +-
 .../scheduler/capacity/ParentQueue.java |   7 +-
 .../scheduler/capacity/TestParentQueue.java |   5 +-
 .../SCMAdminProtocolService.java|  10 +-
 19 files changed, 445 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23bf6c72/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2f0ef7a..fbeca6a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -258,6 +258,8 @@ Release 2.7.0 - UNRELEASED
 YARN-3155. Refactor the exception handling code for TimelineClientImpl's 
 retryOn method (Li Lu via wangda)
 
+YARN-3100. Made YARN authorization pluggable. (Jian He via zjshen)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23bf6c72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d6f6dee..6904543 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -435,6 +435,8 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
   org.apache.hadoop.yarn.LocalConfigurationProvider;
 
+  public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
+  + authorization-provider;
   private static final ListString RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =
   Collections.unmodifiableList(Arrays.asList(
   RM_ADDRESS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23bf6c72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
new file mode 100644
index 000..32459b9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
@@ -0,0 +1,33 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* License); you may not use this file except in compliance
+* with the 

hadoop git commit: YARN-3100. Made YARN authorization pluggable. Contributed by Jian He.

2015-02-09 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 643a8892d - 24d4d933f


YARN-3100. Made YARN authorization pluggable. Contributed by Jian He.

(cherry picked from commit 23bf6c72071782e3fd5a628e21495d6b974c7a9e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24d4d933
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24d4d933
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24d4d933

Branch: refs/heads/branch-2
Commit: 24d4d933f72c4c3c3f2f34b1de73575c65913bd2
Parents: 643a889
Author: Zhijie Shen zjs...@apache.org
Authored: Mon Feb 9 20:34:56 2015 -0800
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Feb 9 20:37:58 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +
 .../apache/hadoop/yarn/security/AccessType.java |  33 ++
 .../hadoop/yarn/security/AdminACLsManager.java  |  31 -
 .../yarn/security/ConfiguredYarnAuthorizer.java |  97 
 .../hadoop/yarn/security/PrivilegedEntity.java  |  83 ++
 .../security/YarnAuthorizationProvider.java | 112 +++
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |   7 +-
 .../server/resourcemanager/AdminService.java|  34 +++---
 .../server/resourcemanager/RMServerUtils.java   |  25 +++--
 .../nodelabels/RMNodeLabelsManager.java |  12 +-
 .../scheduler/SchedulerUtils.java   |  13 +++
 .../scheduler/capacity/AbstractCSQueue.java |  43 +++
 .../scheduler/capacity/CapacityScheduler.java   |  20 +++-
 .../CapacitySchedulerConfiguration.java |  10 +-
 .../scheduler/capacity/LeafQueue.java   |   7 +-
 .../scheduler/capacity/ParentQueue.java |   7 +-
 .../scheduler/capacity/TestParentQueue.java |   5 +-
 .../SCMAdminProtocolService.java|  10 +-
 19 files changed, 445 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24d4d933/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7731840..a5dfe24 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -224,6 +224,8 @@ Release 2.7.0 - UNRELEASED
 YARN-3155. Refactor the exception handling code for TimelineClientImpl's 
 retryOn method (Li Lu via wangda)
 
+YARN-3100. Made YARN authorization pluggable. (Jian He via zjshen)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24d4d933/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a2a2529..d50a700 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -435,6 +435,8 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
   org.apache.hadoop.yarn.LocalConfigurationProvider;
 
+  public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
+  + authorization-provider;
   private static final ListString RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =
   Collections.unmodifiableList(Arrays.asList(
   RM_ADDRESS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24d4d933/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
new file mode 100644
index 000..32459b9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AccessType.java
@@ -0,0 +1,33 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 

hadoop git commit: HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode to register successfully with only one NameNode.(Contributed by Vinayakumar B)

2015-02-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 23bf6c720 - 3d15728ff


HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode 
to register successfully with only one NameNode.(Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d15728f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d15728f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d15728f

Branch: refs/heads/trunk
Commit: 3d15728ff5301296801e541d9b23bd1687c4adad
Parents: 23bf6c7
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Feb 10 10:43:08 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Feb 10 10:43:08 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d15728f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4a6bc11..1ca2263 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -896,6 +896,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
 threads when using FileContext (Arun Suresh via Colin P. McCabe)
 
+HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
+DataNode to register successfully with only one NameNode.(vinayakumarb)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d15728f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index e396727..917b5dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.util.Time.now;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
@@ -797,6 +798,10 @@ class BPServiceActor implements Runnable {
 // Use returned registration from namenode with updated fields
 bpRegistration = bpNamenode.registerDatanode(bpRegistration);
 break;
+  } catch(EOFException e) {  // namenode might have just restarted
+LOG.info(Problem connecting to server:  + nnAddr +  :
++ e.getLocalizedMessage());
+sleepAndLogInterrupts(1000, connecting to server);
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info(Problem connecting to server:  + nnAddr);
 sleepAndLogInterrupts(1000, connecting to server);



hadoop git commit: HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode to register successfully with only one NameNode.(Contributed by Vinayakumar B)

2015-02-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 24d4d933f - a1bf7aecf


HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause DataNode 
to register successfully with only one NameNode.(Contributed by Vinayakumar B)

(cherry picked from commit 3d15728ff5301296801e541d9b23bd1687c4adad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1bf7aec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1bf7aec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1bf7aec

Branch: refs/heads/branch-2
Commit: a1bf7aecf7d018c5305fa3bd7a9e3ef9af3155c1
Parents: 24d4d93
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Feb 10 10:43:08 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Feb 10 10:45:14 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1bf7aec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 06e40bd..6d2715c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -596,6 +596,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
 threads when using FileContext (Arun Suresh via Colin P. McCabe)
 
+HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
+DataNode to register successfully with only one NameNode.(vinayakumarb)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1bf7aec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index e6409ab..c344027 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.util.Time.now;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
@@ -803,6 +804,10 @@ class BPServiceActor implements Runnable {
 // Use returned registration from namenode with updated fields
 bpRegistration = bpNamenode.registerDatanode(bpRegistration);
 break;
+  } catch(EOFException e) {  // namenode might have just restarted
+LOG.info(Problem connecting to server:  + nnAddr +  :
++ e.getLocalizedMessage());
+sleepAndLogInterrupts(1000, connecting to server);
   } catch(SocketTimeoutException e) {  // namenode is busy
 LOG.info(Problem connecting to server:  + nnAddr);
 sleepAndLogInterrupts(1000, connecting to server);