hadoop git commit: HDFS-8920. Erasure Coding: when recovering lost blocks, logs can be too verbose and hurt performance. Contributed by Rui Li

2015-09-21 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a9e6681ed -> 6fc942455


HDFS-8920. Erasure Coding: when recovering lost blocks, logs can be too verbose 
and hurt performance. Contributed by Rui Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc94245
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc94245
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc94245

Branch: refs/heads/HDFS-7285
Commit: 6fc942455274e0c35008b2b8b689aed49b6719bb
Parents: a9e6681
Author: Kai Zheng 
Authored: Wed Sep 23 14:13:26 2015 +0800
Committer: Kai Zheng 
Committed: Wed Sep 23 14:13:26 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 15 +++--
 .../hadoop/hdfs/DFSStripedInputStream.java  | 34 
 3 files changed, 49 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc94245/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 0e21d22..b79ce64 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -444,3 +444,6 @@
 
 HDFS-9091. Erasure Coding: Provide DistributedFilesystem API to 
 getAllErasureCodingPolicies. (Rakesh R via zhz)
+
+HDFS-8920. Erasure Coding: when recovering lost blocks, logs can be too
+verbose and hurt performance. (Rui Li via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc94245/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a5911cc..385ba4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1057,9 +1057,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 }
 if (chosenNode == null) {
-  DFSClient.LOG.warn("No live nodes contain block " + block.getBlock() +
-  " after checking nodes = " + Arrays.toString(nodes) +
-  ", ignoredNodes = " + ignoredNodes);
+  reportLostBlock(block, ignoredNodes);
   return null;
 }
 final String dnAddr =
@@ -1071,6 +1069,17 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 return new DNAddrPair(chosenNode, targetAddr, storageType);
   }
 
+  /**
+   * Warn the user of a lost block
+   */
+  protected void reportLostBlock(LocatedBlock lostBlock,
+  Collection ignoredNodes) {
+DatanodeInfo[] nodes = lostBlock.getLocations();
+DFSClient.LOG.warn("No live nodes contain block " + lostBlock.getBlock() +
+" after checking nodes = " + Arrays.toString(nodes) +
+", ignoredNodes = " + ignoredNodes);
+  }
+
   private static String getBestNodeDNAddrPairErrorString(
   DatanodeInfo nodes[], AbstractMap deadNodes, Collection ignoredNodes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc94245/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index b7c22c4..131a552 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -45,8 +45,11 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
+import java.util.List;
 import java.util.Set;
 import java.util.Collection;
 import java.util.Map;
@@ -154,6 +157,17 @@ public class DFSStripedInputStream extends DFSInputStream {
   private StripeRange curStripeRange;
   private final CompletionService readingService;
 
+  /**
+   * When warning the user of a lost block in striping mode, we remember the
+   * dead nodes we've logged. All other striping blocks on these nodes can be
+ 

hadoop git commit: HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng

2015-03-04 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 f1b28c19d -> 74e174689


HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74e17468
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74e17468
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74e17468

Branch: refs/heads/HDFS-7285
Commit: 74e1746893d09ac20eea54372fd4f7a7309f551e
Parents: f1b28c1
Author: drankye 
Authored: Thu Mar 5 22:51:52 2015 +0800
Committer: drankye 
Committed: Thu Mar 5 22:51:52 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 203 +++
 .../hadoop/io/erasurecode/TestECSchema.java |  54 +
 3 files changed, 261 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e17468/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7bbacf7..ee42c84 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -12,3 +12,7 @@
 HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
 ( Kai Zheng )
 
+HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e17468/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
new file mode 100644
index 000..8dc3f45
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * Erasure coding schema to housekeeper relevant information.
+ */
+public class ECSchema {
+  public static final String NUM_DATA_UNITS_KEY = "k";
+  public static final String NUM_PARITY_UNITS_KEY = "m";
+  public static final String CODEC_NAME_KEY = "codec";
+  public static final String CHUNK_SIZE_KEY = "chunkSize";
+  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+
+  private String schemaName;
+  private String codecName;
+  private Map options;
+  private int numDataUnits;
+  private int numParityUnits;
+  private int chunkSize;
+
+  /**
+   * Constructor with schema name and provided options. Note the options may
+   * contain additional information for the erasure codec to interpret further.
+   * @param schemaName schema name
+   * @param options schema options
+   */
+  public ECSchema(String schemaName, Map options) {
+assert (schemaName != null && ! schemaName.isEmpty());
+
+this.schemaName = schemaName;
+
+if (options == null || options.isEmpty()) {
+  throw new IllegalArgumentException("No schema options are provided");
+}
+
+String codecName = options.get(CODEC_NAME_KEY);
+if (codecName == null || codecName.isEmpty()) {
+  throw new IllegalArgumentException("No codec option is provided");
+}
+
+int dataUnits = 0, parityUnits = 0;
+try {
+  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
+dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
+  }
+} catch (NumberFormatException e) {
+  throw new IllegalArgumentException("Option value " +
+  options.get(CHUNK_SIZE_

hadoop git commit: HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng

2015-03-12 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 1e3231953 -> 8991bab74


HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8991bab7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8991bab7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8991bab7

Branch: refs/heads/HDFS-7285
Commit: 8991bab746d7a088d96f4d02581434cde914e1a9
Parents: 1e32319
Author: drankye 
Authored: Thu Mar 12 23:35:22 2015 +0800
Committer: drankye 
Committed: Thu Mar 12 23:35:22 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  5 ++-
 .../rawcoder/AbstractRawErasureCoder.java   |  5 ++-
 .../hadoop/io/erasurecode/TestCoderBase.java|  6 
 .../erasurecode/coder/TestErasureCoderBase.java | 36 +---
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 13 +--
 6 files changed, 60 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8991bab7/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c17a1bd..a97dc34 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -18,3 +18,7 @@
 HADOOP-11646. Erasure Coder API for encoding and decoding of block group
 ( Kai Zheng via vinayakumarb )
 
+HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8991bab7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index f2cc041..8d3bc34 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configured;
+
 /**
  * A common class of basic facilities to be shared by encoder and decoder
  *
  * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureCoder implements ErasureCoder {
+public abstract class AbstractErasureCoder
+extends Configured implements ErasureCoder {
 
   private int numDataUnits;
   private int numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8991bab7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 74d2ab6..e6f3d92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
+import org.apache.hadoop.conf.Configured;
+
 /**
  * A common class of basic facilities to be shared by encoder and decoder
  *
  * It implements the {@link RawErasureCoder} interface.
  */
-public abstract class AbstractRawErasureCoder implements RawErasureCoder {
+public abstract class AbstractRawErasureCoder
+extends Configured implements RawErasureCoder {
 
   private int numDataUnits;
   private int numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8991bab7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 3c4288c..194413a 100644
---

hadoop git commit: Fixed a compiling issue introduced by HADOOP-11705.

2015-03-12 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 8991bab74 -> 5ff0e73ce


Fixed a compiling issue introduced by HADOOP-11705.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ff0e73c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ff0e73c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ff0e73c

Branch: refs/heads/HDFS-7285
Commit: 5ff0e73ceb5f717e3cb33b53e89260109d75e36f
Parents: 8991bab
Author: Kai Zheng 
Authored: Fri Mar 13 00:13:06 2015 +0800
Committer: Kai Zheng 
Committed: Fri Mar 13 00:13:06 2015 +0800

--
 .../apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ff0e73c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
index 36e061a..d911db9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
@@ -162,7 +162,7 @@ public abstract class TestErasureCoderBase extends 
TestCoderBase {
 }
 
 encoder.initialize(numDataUnits, numParityUnits, chunkSize);
-encoder.setConf(conf);
+((AbstractErasureCoder)encoder).setConf(conf);
 return encoder;
   }
 
@@ -179,7 +179,7 @@ public abstract class TestErasureCoderBase extends 
TestCoderBase {
 }
 
 decoder.initialize(numDataUnits, numParityUnits, chunkSize);
-decoder.setConf(conf);
+((AbstractErasureCoder)decoder).setConf(conf);
 return decoder;
   }
 



[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt accordingly

2015-03-17 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt accordingly


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d604386
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d604386
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d604386

Branch: refs/heads/HDFS-7285
Commit: 7d6043869a970f8be6bf56ce0fbe14d4956a35b3
Parents: 902c9a7
Author: Kai Zheng 
Authored: Wed Mar 18 19:24:24 2015 +0800
Committer: Kai Zheng 
Committed: Wed Mar 18 19:24:24 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d604386/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index a97dc34..e27ff5c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -19,6 +19,9 @@
 ( Kai Zheng via vinayakumarb )
 
 HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng
-( Kai Zheng )
+( Kai Zheng )
+
+HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
+( Kai Zheng )
 
 



[1/2] hadoop git commit: HADOOP-11706 Refine a little bit erasure coder API

2015-03-17 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 050018b11 -> 7d6043869


HADOOP-11706 Refine a little bit erasure coder API


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/902c9a73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/902c9a73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/902c9a73

Branch: refs/heads/HDFS-7285
Commit: 902c9a73f593337c8c87c8434fa167d3076f6453
Parents: 050018b
Author: Kai Zheng 
Authored: Wed Mar 18 19:21:37 2015 +0800
Committer: Kai Zheng 
Committed: Wed Mar 18 19:21:37 2015 +0800

--
 .../io/erasurecode/coder/ErasureCoder.java  |  4 +++-
 .../erasurecode/rawcoder/RawErasureCoder.java   |  4 +++-
 .../hadoop/io/erasurecode/TestCoderBase.java| 17 +---
 .../erasurecode/coder/TestErasureCoderBase.java | 21 +++-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 12 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  2 ++
 6 files changed, 31 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/902c9a73/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
index 68875c0..c5922f3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configurable;
+
 /**
  * An erasure coder to perform encoding or decoding given a group. Generally it
  * involves calculating necessary internal steps according to codec logic. For
@@ -31,7 +33,7 @@ package org.apache.hadoop.io.erasurecode.coder;
  * of multiple coding steps.
  *
  */
-public interface ErasureCoder {
+public interface ErasureCoder extends Configurable {
 
   /**
* Initialize with the important parameters for the code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902c9a73/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
index 91a9abf..9af5b6c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
+import org.apache.hadoop.conf.Configurable;
+
 /**
  * RawErasureCoder is a common interface for {@link RawErasureEncoder} and
  * {@link RawErasureDecoder} as both encoder and decoder share some properties.
@@ -31,7 +33,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  * low level constructs, since it only takes care of the math calculation with
  * a group of byte buffers.
  */
-public interface RawErasureCoder {
+public interface RawErasureCoder extends Configurable {
 
   /**
* Initialize with the important parameters for the code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902c9a73/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 194413a..22fd98d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
+import org.apache.hadoop.conf.Configuration;
+
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Random;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -31,6 +32,7 @@ import static org.junit.Assert.assertTrue;
 public abstract class TestCoderBase {
   protected static 

hadoop git commit: HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai Zheng

2015-03-19 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 5c6a774ff -> a34ecf7c7


HADOOP-11707. Add factory to create raw erasure coder.  Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a34ecf7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a34ecf7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a34ecf7c

Branch: refs/heads/HDFS-7285
Commit: a34ecf7c73f61853ad889ee589bf89be64aa8a40
Parents: 5c6a774
Author: Kai Zheng 
Authored: Fri Mar 20 15:07:00 2015 +0800
Committer: Kai Zheng 
Committed: Fri Mar 20 15:07:00 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +-
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ++
 .../rawcoder/RawErasureCoderFactory.java| 38 
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ++
 4 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a34ecf7c/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index e27ff5c..f566f0e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -24,4 +24,5 @@
 HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
-
+HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a34ecf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
new file mode 100644
index 000..d6b40aa
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+/**
+ * A raw coder factory for raw Reed-Solomon coder in Java.
+ */
+public class JRSRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  @Override
+  public RawErasureEncoder createEncoder() {
+return new JRSRawEncoder();
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder() {
+return new JRSRawDecoder();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a34ecf7c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
new file mode 100644
index 000..95a1cfe
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+

hadoop git commit: HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng

2015-03-19 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a34ecf7c7 -> 180c54ca6


HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/180c54ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/180c54ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/180c54ca

Branch: refs/heads/HDFS-7285
Commit: 180c54ca6003acf47405b5952856cf9a0fe17e75
Parents: a34ecf7
Author: Kai Zheng 
Authored: Fri Mar 20 19:15:52 2015 +0800
Committer: Kai Zheng 
Committed: Fri Mar 20 19:15:52 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/fs/CommonConfigurationKeys.java  | 15 
 .../erasurecode/coder/AbstractErasureCoder.java | 65 ++
 .../coder/AbstractErasureDecoder.java   |  6 +-
 .../coder/AbstractErasureEncoder.java   |  6 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 83 ++
 .../io/erasurecode/coder/RSErasureEncoder.java  | 47 ++
 .../io/erasurecode/coder/XorErasureDecoder.java |  2 +-
 .../io/erasurecode/coder/XorErasureEncoder.java |  2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   | 92 
 10 files changed, 315 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/180c54ca/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index f566f0e..b69e69a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -26,3 +26,6 @@
 
 HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/180c54ca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 7575496..70fea01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -135,6 +135,21 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   false;
 
   /**
+   * Erasure Coding configuration family
+   */
+
+  /** Supported erasure codec classes */
+  public static final String IO_ERASURECODE_CODECS_KEY = 
"io.erasurecode.codecs";
+
+  /** Use XOR raw coder when possible for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
+  "io.erasurecode.codec.rs.usexor";
+
+  /** Raw coder factory for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+  "io.erasurecode.codec.rs.rawcoder";
+
+  /**
* Service Authorization
*/
   public static final String 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/180c54ca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index 8d3bc34..0e4de89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -31,6 +36,66 @@ public abstract class AbstractErasureCoder
   private int numParityUnits;
   private int chunkSize;
 
+  /**
+   * Create

hadoop git commit: Minor, fixed the ec schema file property key

2015-03-29 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 45bf18e2a -> 27cb5701e


Minor, fixed the ec schema file property key


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27cb5701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27cb5701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27cb5701

Branch: refs/heads/HDFS-7285
Commit: 27cb5701ee5d418a66d17ec68b7c8ab52d4a2c70
Parents: 45bf18e
Author: Kai Zheng 
Authored: Mon Mar 30 07:21:05 2015 +0800
Committer: Drankye 
Committed: Mon Mar 30 07:21:05 2015 +0800

--
 .../main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27cb5701/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index cbf0e61..af32674 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -142,7 +142,7 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String IO_ERASURECODE_CODECS_KEY = 
"io.erasurecode.codecs";
 
   public static final String IO_ERASURECODE_SCHEMA_FILE_KEY =
-  "hadoop.io.erasurecode.";
+  "io.erasurecode.schema.file";
   public static final String IO_ERASURECODE_SCHEMA_FILE_DEFAULT =
   "ecschema-def.xml";
 



[2/2] hadoop git commit: Update CHANGES-HDFS-EC-7285.txt

2015-04-01 Thread drankye
Update CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/455b89d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/455b89d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/455b89d0

Branch: refs/heads/HDFS-7285
Commit: 455b89d069684e9ac76b2a88106ad60666cac5e4
Parents: 2f9119a
Author: Kai Zheng 
Authored: Thu Apr 2 05:15:58 2015 +0800
Committer: Kai Zheng 
Committed: Thu Apr 2 05:15:58 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/455b89d0/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b69e69a..01280db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -29,3 +29,6 @@
 
 HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
 ( Kai Zheng )
+
+HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
+( Xinwei Qin via Kai Zheng )



[1/2] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin

2015-04-01 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 b3e2fc1ed -> 455b89d06


HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f9119a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f9119a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f9119a8

Branch: refs/heads/HDFS-7285
Commit: 2f9119a8b1e73fa8c1efa9f11e446a91ea01b449
Parents: b3e2fc1
Author: Kai Zheng 
Authored: Thu Apr 2 05:12:35 2015 +0800
Committer: Kai Zheng 
Committed: Thu Apr 2 05:12:35 2015 +0800

--
 .../src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9119a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8dc3f45..27be00e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -64,7 +64,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException("Option value " +
-  options.get(CHUNK_SIZE_KEY) + " for " + CHUNK_SIZE_KEY +
+  options.get(NUM_DATA_UNITS_KEY) + " for " + NUM_DATA_UNITS_KEY +
   " is found. It should be an integer");
 }
 
@@ -74,7 +74,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException("Option value " +
-  options.get(CHUNK_SIZE_KEY) + " for " + CHUNK_SIZE_KEY +
+  options.get(NUM_PARITY_UNITS_KEY) + " for " + NUM_PARITY_UNITS_KEY +
   " is found. It should be an integer");
 }
 



[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-07 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ceda439
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ceda439
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ceda439

Branch: refs/heads/HDFS-7285
Commit: 1ceda439494e62865f2e87271cfcc1d0052e3240
Parents: a948cb7
Author: Kai Zheng 
Authored: Wed Apr 8 01:31:46 2015 +0800
Committer: Kai Zheng 
Committed: Wed Apr 8 01:31:46 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ceda439/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 01280db..68d1d32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -32,3 +32,6 @@
 
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
+
+HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
+( Kai Zheng )



[1/2] hadoop git commit: HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng

2015-04-07 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a84274f90 -> 1ceda4394


HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a948cb76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a948cb76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a948cb76

Branch: refs/heads/HDFS-7285
Commit: a948cb76e4c9c9dabcf268040ba191b1b0be1fd7
Parents: a84274f
Author: Kai Zheng 
Authored: Wed Apr 8 01:26:40 2015 +0800
Committer: Kai Zheng 
Committed: Wed Apr 8 01:26:40 2015 +0800

--
 .../io/erasurecode/coder/RSErasureDecoder.java  |  8 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  4 +-
 .../io/erasurecode/coder/XORErasureDecoder.java | 78 
 .../io/erasurecode/coder/XORErasureEncoder.java | 45 ++
 .../io/erasurecode/coder/XorErasureDecoder.java | 78 
 .../io/erasurecode/coder/XorErasureEncoder.java | 45 --
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  | 69 ---
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  | 78 
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ---
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 78 
 .../rawcoder/RSRawErasureCoderFactory.java  | 34 +++
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 81 +
 .../io/erasurecode/rawcoder/XORRawEncoder.java  | 61 +
 .../rawcoder/XORRawErasureCoderFactory.java | 34 +++
 .../io/erasurecode/rawcoder/XorRawDecoder.java  | 81 -
 .../io/erasurecode/rawcoder/XorRawEncoder.java  | 61 -
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ---
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 .../io/erasurecode/coder/TestXORCoder.java  | 50 +++
 .../io/erasurecode/coder/TestXorCoder.java  | 50 ---
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 93 
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 93 
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 49 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   | 51 ---
 25 files changed, 680 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a948cb76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index ba32f04..e2c5051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -4,9 +4,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.rawcoder.JRSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.XorRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
 
 /**
  * Reed-Solomon erasure decoder that decodes a block group.
@@ -56,7 +56,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   rsRawDecoder = createRawDecoder(
   CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY);
   if (rsRawDecoder == null) {
-rsRawDecoder = new JRSRawDecoder();
+rsRawDecoder = new RSRawDecoder();
   }
   rsRawDecoder.initialize(getNumDataUnits(),
   getNumParityUnits(), getChunkSize());
@@ -66,7 +66,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateXorRawDecoder() {
 if (xorRawDecoder == null) {
-  xorRawDecoder = new XorRawDecoder();
+  xorRawDecoder = new XORRawDecoder();
   xorRawDecoder.initialize(getNumDataUnits(), 1, getChunkSize());
 }
 return xorRawDecoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a948cb76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/i

[1/2] hadoop git commit: HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng

2015-04-08 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 ead48867a -> d022be287


HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ca56197
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ca56197
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ca56197

Branch: refs/heads/HDFS-7285
Commit: 7ca56197d6b7f3074800c7b701f81b70b442a223
Parents: ead4886
Author: Kai Zheng 
Authored: Thu Apr 9 01:30:02 2015 +0800
Committer: Kai Zheng 
Committed: Thu Apr 9 01:30:02 2015 +0800

--
 .../src/main/conf/ecschema-def.xml  |  5 --
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 57 +-
 .../hdfs/server/namenode/ECSchemaManager.java   | 62 
 3 files changed, 117 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca56197/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
index e619485..e36d386 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -27,11 +27,6 @@ You can modify and remove those not used yet, or add new 
ones.
 -->
 
 
-  
-6
-3
-RS
-  
   
 10
 4

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca56197/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 27be00e..8c3310e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -23,12 +23,12 @@ import java.util.Map;
 /**
  * Erasure coding schema to housekeeper relevant information.
  */
-public class ECSchema {
+public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = "k";
   public static final String NUM_PARITY_UNITS_KEY = "m";
   public static final String CODEC_NAME_KEY = "codec";
   public static final String CHUNK_SIZE_KEY = "chunkSize";
-  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   private String schemaName;
   private String codecName;
@@ -82,6 +82,18 @@ public class ECSchema {
   }
 
   /**
+   * Constructor with key parameters provided.
+   * @param schemaName
+   * @param codecName
+   * @param numDataUnits
+   * @param numParityUnits
+   */
+  public ECSchema(String schemaName, String codecName,
+  int numDataUnits, int numParityUnits) {
+this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  }
+
+  /**
* Constructor with key parameters provided. Note the options may contain
* additional information for the erasure codec to interpret further.
* @param schemaName
@@ -200,4 +212,45 @@ public class ECSchema {
 
 return sb.toString();
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+ECSchema ecSchema = (ECSchema) o;
+
+if (numDataUnits != ecSchema.numDataUnits) {
+  return false;
+}
+if (numParityUnits != ecSchema.numParityUnits) {
+  return false;
+}
+if (chunkSize != ecSchema.chunkSize) {
+  return false;
+}
+if (!schemaName.equals(ecSchema.schemaName)) {
+  return false;
+}
+if (!codecName.equals(ecSchema.codecName)) {
+  return false;
+}
+return options.equals(ecSchema.options);
+  }
+
+  @Override
+  public int hashCode() {
+int result = schemaName.hashCode();
+result = 31 * result + codecName.hashCode();
+result = 31 * result + options.hashCode();
+result = 31 * result + numDataUnits;
+result = 31 * result + numParityUnits;
+result = 31 * result + chunkSize;
+
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca56197/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ECSchemaManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ECSchemaMana

[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-08 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d022be28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d022be28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d022be28

Branch: refs/heads/HDFS-7285
Commit: d022be28718e7c2f3fe76bdb76b7e15c8d9eff0a
Parents: 7ca5619
Author: Kai Zheng 
Authored: Thu Apr 9 01:31:52 2015 +0800
Committer: Kai Zheng 
Committed: Thu Apr 9 01:31:52 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d022be28/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 7423033..5078a15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -52,4 +52,6 @@
 manage EC zones (Zhe Zhang)
 
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
-NameNode (vinayakumarb)
\ No newline at end of file
+NameNode (vinayakumarb)
+
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file



[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-09 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19cc05b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19cc05b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19cc05b5

Branch: refs/heads/HDFS-7285
Commit: 19cc05b5220f4a9246348f2eb623613d04065679
Parents: f3885a7
Author: Kai Zheng 
Authored: Fri Apr 10 04:34:24 2015 +0800
Committer: Kai Zheng 
Committed: Fri Apr 10 04:34:24 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19cc05b5/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c72394e..b850e11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -40,3 +40,5 @@
 
 HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
 ( Kai Zheng via vinayakumarb )
+  
+HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)



[1/2] hadoop git commit: HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng

2015-04-09 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 d022be287 -> de3621c0c


HDFS-8104 Make hard-coded values consistent with the system default schema 
first before remove them. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5635a44c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5635a44c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5635a44c

Branch: refs/heads/HDFS-7285
Commit: 5635a44cf84201aa5626685b93d54af4b36b68bb
Parents: d022be2
Author: Kai Zheng 
Authored: Fri Apr 10 00:16:28 2015 +0800
Committer: Kai Zheng 
Committed: Fri Apr 10 00:16:28 2015 +0800

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  12 +-
 .../hadoop/hdfs/TestPlanReadPortions.java   | 142 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 112 ---
 3 files changed, 151 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5635a44c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index a888aa4..11c5260 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -180,11 +180,17 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 
-  public static final byte NUM_DATA_BLOCKS = 3;
-  public static final byte NUM_PARITY_BLOCKS = 2;
+
   public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 
+  /*
+   * These values correspond to the values used by the system default schema.
+   * TODO: to be removed once all places use schema.
+   */
+
+  public static final byte NUM_DATA_BLOCKS = 6;
+  public static final byte NUM_PARITY_BLOCKS = 3;
   // The chunk size for striped block which is used by erasure coding
-  public static final int BLOCK_STRIPED_CELL_SIZE = 128 * 1024;
+  public static final int BLOCK_STRIPED_CELL_SIZE = 256 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5635a44c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
new file mode 100644
index 000..cf84b30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSStripedInputStream.ReadPortion;
+import static org.junit.Assert.*;
+
+public class TestPlanReadPortions {
+
+  // We only support this as num of data blocks. It might be good enough for 
now
+  // for the purpose, even not flexible yet for any number in a schema.
+  private final short GROUP_SIZE = 3;
+  private final int CELLSIZE = 128 * 1024;
+
+  private void testPlanReadPortions(int startInBlk, int length,
+  int bufferOffset, int[] readLengths, int[] offsetsInBlock,
+  int[][] bufferOffsets, int[][] bufferLengths) {
+ReadPortion[] results = DFSStripedInputStream.planReadPortions(GROUP_SIZE,
+CELLSIZE, startInBlk, length, bufferOffset);
+assertEquals(GROUP_SIZE, results.length);
+
+for (int i = 0; i < GROUP_SIZE; i++) {
+  assertEquals(readLengths[i], results[i].getReadLength());
+  assertEquals(offs

[1/2] hadoop git commit: HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R

2015-04-09 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 de3621c0c -> 19cc05b52


HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3885a71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3885a71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3885a71

Branch: refs/heads/HDFS-7285
Commit: f3885a7131930802b71adc33b29ef7165a59962e
Parents: de3621c
Author: Kai Zheng 
Authored: Fri Apr 10 04:31:48 2015 +0800
Committer: Kai Zheng 
Committed: Fri Apr 10 04:31:48 2015 +0800

--
 .../hadoop/io/erasurecode/SchemaLoader.java  | 12 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java   | 19 ++-
 .../io/erasurecode/coder/RSErasureEncoder.java   | 19 ++-
 .../io/erasurecode/coder/XORErasureDecoder.java  |  2 +-
 .../io/erasurecode/rawcoder/util/RSUtil.java | 17 +
 5 files changed, 60 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3885a71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
index c51ed37..75dd03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.w3c.dom.*;
@@ -36,7 +36,7 @@ import java.util.*;
  * A EC schema loading utility that loads predefined EC schemas from XML file
  */
 public class SchemaLoader {
-  private static final Log LOG = 
LogFactory.getLog(SchemaLoader.class.getName());
+  private static final Logger LOG = 
LoggerFactory.getLogger(SchemaLoader.class.getName());
 
   /**
* Load predefined ec schemas from configuration file. This file is
@@ -63,7 +63,7 @@ public class SchemaLoader {
   private List loadSchema(File schemaFile)
   throws ParserConfigurationException, IOException, SAXException {
 
-LOG.info("Loading predefined EC schema file " + schemaFile);
+LOG.info("Loading predefined EC schema file {}", schemaFile);
 
 // Read and parse the schema file.
 DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@@ -87,7 +87,7 @@ public class SchemaLoader {
   ECSchema schema = loadSchema(element);
 schemas.add(schema);
 } else {
-  LOG.warn("Bad element in EC schema configuration file: " +
+  LOG.warn("Bad element in EC schema configuration file: {}",
   element.getTagName());
 }
   }
@@ -109,7 +109,7 @@ public class SchemaLoader {
   URL url = Thread.currentThread().getContextClassLoader()
   .getResource(schemaFilePath);
   if (url == null) {
-LOG.warn(schemaFilePath + " not found on the classpath.");
+LOG.warn("{} not found on the classpath.", schemaFilePath);
 schemaFile = null;
   } else if (! url.getProtocol().equalsIgnoreCase("file")) {
 throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3885a71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index e2c5051..fc664a5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the Licens

[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-09 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de3621c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de3621c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de3621c0

Branch: refs/heads/HDFS-7285
Commit: de3621c0cdee724217b636c13b35de6ef64cd89f
Parents: 5635a44
Author: Kai Zheng 
Authored: Fri Apr 10 00:18:14 2015 +0800
Committer: Kai Zheng 
Committed: Fri Apr 10 00:18:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3621c0/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5078a15..1e695c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -54,4 +54,6 @@
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
-HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
+
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file



hadoop git commit: HADOOP-11541. Raw XOR coder

2015-02-07 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC 2fc501aad -> e031adecc


HADOOP-11541. Raw XOR coder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e031adec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e031adec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e031adec

Branch: refs/heads/HDFS-EC
Commit: e031adecc5cb7414395c6708e8d6fb12dcf6f8d8
Parents: 2fc501a
Author: Kai Zheng 
Authored: Sun Feb 8 01:40:27 2015 +0800
Committer: drankye 
Committed: Sun Feb 8 01:40:27 2015 +0800

--
 .../io/erasurecode/rawcoder/XorRawDecoder.java  |  81 ++
 .../io/erasurecode/rawcoder/XorRawEncoder.java  |  61 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 262 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  96 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   |  52 
 5 files changed, 552 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e031adec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
new file mode 100644
index 000..98307a7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ */
+public class XorRawDecoder extends AbstractRawErasureDecoder {
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length <= 1);
+
+int bufSize = inputs[0].remaining();
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j < bufSize; j++) {
+  outputs[0].put(j, (byte) 0);
+}
+
+// Process the inputs.
+for (int i = 0; i < inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j < bufSize; j++) {
+outputs[0].put(j, (byte) (outputs[0].get(j) ^ inputs[i].get(j)));
+  }
+}
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] erasedIndexes,
+  byte[][] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length <= 1);
+
+int bufSize = inputs[0].length;
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j < bufSize; j++) {
+  outputs[0][j] = 0;
+}
+
+// Process the inputs.
+for (int i = 0; i < inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j < bufSize; j++) {
+outputs[0][j] ^= inputs[i][j];
+  }
+}
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e031adec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
new file mode 100644
index 000..99b20b9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to

hadoop git commit: Added the missed entry for commit of HADOOP-11541

2015-02-08 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC b5625a72a -> 71a67e955


Added the missed entry for commit of HADOOP-11541


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71a67e95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71a67e95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71a67e95

Branch: refs/heads/HDFS-EC
Commit: 71a67e95517f34ac86bcdbc924cb81f5f883fd91
Parents: b5625a7
Author: drankye 
Authored: Mon Feb 9 22:04:08 2015 +0800
Committer: drankye 
Committed: Mon Feb 9 22:04:08 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a67e95/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 2124800..9728f97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -4,4 +4,7 @@
 (Kai Zheng via umamahesh)
 
 HADOOP-11534. Minor improvements for raw erasure coders
-( Kai Zheng via vinayakumarb )
\ No newline at end of file
+( Kai Zheng via vinayakumarb )
+
+HADOOP-11541. Raw XOR coder
+( Kai Zheng )



[1/2] hadoop git commit: HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng

2015-02-11 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-EC 5bb5e3c8f -> 41921ce96


HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21c2076b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21c2076b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21c2076b

Branch: refs/heads/HDFS-EC
Commit: 21c2076b9d311ddab6ec3f8044c3af81066cb82b
Parents: 5bb5e3c
Author: drankye 
Authored: Thu Feb 12 19:57:57 2015 +0800
Committer: drankye 
Committed: Thu Feb 12 19:57:57 2015 +0800

--
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  |  69 +++
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  |  78 +++
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  | 497 +++
 .../io/erasurecode/rawcoder/util/RSUtil.java|  22 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  28 +-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   |  93 
 .../erasurecode/rawcoder/TestRawCoderBase.java  |   5 +-
 .../erasurecode/rawcoder/TestXorRawCoder.java   |   1 -
 9 files changed, 782 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c2076b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
new file mode 100644
index 000..dbb689e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw erasure decoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native 
implementations
+ * when possible.
+ */
+public class JRSRawDecoder extends AbstractRawErasureDecoder {
+  // To describe and calculate the needed Vandermonde matrix
+  private int[] errSignature;
+  private int[] primitivePower;
+
+  @Override
+  public void initialize(int numDataUnits, int numParityUnits, int chunkSize) {
+super.initialize(numDataUnits, numParityUnits, chunkSize);
+assert (getNumDataUnits() + getNumParityUnits() < 
RSUtil.GF.getFieldSize());
+
+this.errSignature = new int[getNumParityUnits()];
+this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+getNumParityUnits());
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+for (int i = 0; i < erasedIndexes.length; i++) {
+  errSignature[i] = primitivePower[erasedIndexes[i]];
+  RSUtil.GF.substitute(inputs, outputs[i], primitivePower[i]);
+}
+
+int dataLen = inputs[0].remaining();
+RSUtil.GF.solveVandermondeSystem(errSignature, outputs,
+erasedIndexes.length, dataLen);
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] erasedIndexes,
+  byte[][] outputs) {
+for (int i = 0; i < erasedIndexes.length; i++) {
+  errSignature[i] = primitivePower[erasedIndexes[i]];
+  RSUtil.GF.substitute(inputs, outputs[i], primitivePower[i]);
+}
+
+int dataLen = inputs[0].length;
+RSUtil.GF.solveVandermondeSystem(errSignature, outputs,
+erasedIndexes.length, dataLen);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c2076b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawEncoder.java
--

[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt for HADOOP-11542.

2015-02-11 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt for HADOOP-11542.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41921ce9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41921ce9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41921ce9

Branch: refs/heads/HDFS-EC
Commit: 41921ce96185971cfd593581f2443b0cfe2ccc21
Parents: 21c2076
Author: drankye 
Authored: Thu Feb 12 20:03:33 2015 +0800
Committer: drankye 
Committed: Thu Feb 12 20:03:33 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41921ce9/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9728f97..6fa1c75 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -8,3 +8,7 @@
 
 HADOOP-11541. Raw XOR coder
 ( Kai Zheng )
+
+HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
+( Kai Zheng )
+



hadoop git commit: HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng

2015-02-11 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 2516efd8a -> e74644311


HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7464431
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7464431
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7464431

Branch: refs/heads/HDFS-7285
Commit: e746443111b89690dbefd0595d8ab346e38cc101
Parents: 2516efd
Author: drankye 
Authored: Thu Feb 12 21:12:44 2015 +0800
Committer: drankye 
Committed: Thu Feb 12 21:12:44 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  |  69 +++
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  |  78 +++
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  | 497 +++
 .../io/erasurecode/rawcoder/util/RSUtil.java|  22 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  28 +-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   |  93 
 .../erasurecode/rawcoder/TestRawCoderBase.java  |   5 +-
 .../erasurecode/rawcoder/TestXorRawCoder.java   |   1 -
 10 files changed, 786 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7464431/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9728f97..7bbacf7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -8,3 +8,7 @@
 
 HADOOP-11541. Raw XOR coder
 ( Kai Zheng )
+
+HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
+( Kai Zheng )
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7464431/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
new file mode 100644
index 000..dbb689e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw erasure decoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native 
implementations
+ * when possible.
+ */
+public class JRSRawDecoder extends AbstractRawErasureDecoder {
+  // To describe and calculate the needed Vandermonde matrix
+  private int[] errSignature;
+  private int[] primitivePower;
+
+  @Override
+  public void initialize(int numDataUnits, int numParityUnits, int chunkSize) {
+super.initialize(numDataUnits, numParityUnits, chunkSize);
+assert (getNumDataUnits() + getNumParityUnits() < 
RSUtil.GF.getFieldSize());
+
+this.errSignature = new int[getNumParityUnits()];
+this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+getNumParityUnits());
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+for (int i = 0; i < erasedIndexes.length; i++) {
+  errSignature[i] = primitivePower[erasedIndexes[i]];
+  RSUtil.GF.substitute(inputs, outputs[i], primitivePower[i]);
+}
+
+int dataLen = inputs[0].remaining();
+RSUtil.GF.solveVandermondeSystem(errSignature, outp

hadoop git commit: HDFS-8136. Client gets and uses EC schema when reads and writes a stripping file. Contributed by Kai Sasaki

2015-04-23 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 2b093df01 -> 9d3e3569f


HDFS-8136. Client gets and uses EC schema when reads and writes a stripping 
file. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d3e3569
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d3e3569
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d3e3569

Branch: refs/heads/HDFS-7285
Commit: 9d3e3569fe0b19fc0ee13ae5ecf385f533467dfa
Parents: 2b093df
Author: Kai Zheng 
Authored: Fri Apr 24 00:19:12 2015 +0800
Committer: Kai Zheng 
Committed: Fri Apr 24 00:19:12 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  |  17 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java |  24 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 175 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   4 +-
 .../apache/hadoop/hdfs/TestReadStripedFile.java |   1 -
 7 files changed, 210 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3e3569/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b2faac0..8977c46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -119,3 +119,6 @@
 
 HDFS-8156. Add/implement necessary APIs even we just have the system 
default 
 schema. (Kai Zheng via Zhe Zhang)
+
+HDFS-8136. Client gets and uses EC schema when reads and writes a stripping
+file. (Kai Sasaki via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3e3569/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index d597407..d0e2b68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -21,9 +21,9 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
@@ -125,13 +125,19 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return results;
   }
 
-  private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
-  private final short dataBlkNum = HdfsConstants.NUM_DATA_BLOCKS;
-  private final short parityBlkNum = HdfsConstants.NUM_PARITY_BLOCKS;
+  private final int cellSize;
+  private final short dataBlkNum;
+  private final short parityBlkNum;
+  private final ECInfo ecInfo;
 
   DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum)
   throws IOException {
 super(dfsClient, src, verifyChecksum);
+// ECInfo is restored from NN just before reading striped file.
+ecInfo = dfsClient.getErasureCodingInfo(src);
+cellSize = ecInfo.getSchema().getChunkSize();
+dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
+parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();
 DFSClient.LOG.debug("Creating an striped input stream for file " + src);
   }
 
@@ -279,9 +285,6 @@ public class DFSStripedInputStream extends DFSInputStream {
 throw new InterruptedException("let's retry");
   }
 
-  public void setCellSize(int cellSize) {
-this.cellSize = cellSize;
-  }
 
   /**
* This class represents the portion of I/O associated with each block in the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3e3569/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/had

hadoop git commit: HADOOP-11921. Enhance tests for erasure coders. Contributed by Kai Zheng

2015-05-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 850d7fac9 -> 3140bc0f0


HADOOP-11921. Enhance tests for erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3140bc0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3140bc0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3140bc0f

Branch: refs/heads/HDFS-7285
Commit: 3140bc0f00cc12633ab5ff31613942c1ce9c90d2
Parents: 850d7fa
Author: Kai Zheng 
Authored: Thu May 7 06:07:51 2015 +0800
Committer: Kai Zheng 
Committed: Thu May 7 06:07:51 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 +
 .../hadoop/fs/CommonConfigurationKeys.java  |  4 --
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 20 ++--
 .../erasurecode/coder/AbstractErasureCoder.java |  6 +--
 .../io/erasurecode/coder/RSErasureDecoder.java  | 40 +--
 .../rawcoder/AbstractRawErasureCoder.java   | 35 +-
 .../rawcoder/AbstractRawErasureDecoder.java | 51 
 .../rawcoder/AbstractRawErasureEncoder.java | 48 +-
 .../erasurecode/rawcoder/RawErasureCoder.java   |  8 +--
 .../erasurecode/rawcoder/RawErasureDecoder.java | 24 +
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 24 +++--
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |  6 ++-
 .../hadoop/io/erasurecode/TestCoderBase.java|  4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 14 files changed, 146 insertions(+), 130 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3140bc0f/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9749270..bfbdcb9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -44,3 +44,5 @@
 HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)
 
 HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)
+
+HADOOP-11920 Enhance tests for erasure coders. (Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3140bc0f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index bd2a24b..3f2871b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -143,10 +143,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
"io.erasurecode.codecs";
 
-  /** Use XOR raw coder when possible for the RS codec */
-  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
-  "io.erasurecode.codec.rs.usexor";
-
   /** Raw coder factory for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
   "io.erasurecode.codec.rs.rawcoder";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3140bc0f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index 01e8f35..34dd90b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -58,8 +58,15 @@ public class ECChunk {
   public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
 ByteBuffer[] buffers = new ByteBuffer[chunks.length];
 
+ECChunk chunk;
 for (int i = 0; i < chunks.length; i++) {
-  buffers[i] = chunks[i].getBuffer();
+  chunk = chunks[i];
+  if (chunk == null) {
+buffers[i] = null;
+continue;
+  }
+
+  buffers[i] = chunk.getBuffer();
 }
 
 return buffers;
@@ -71,12 +78,19 @@ public class ECChunk {
* @param chunks
* @return an array of byte array
*/
-  public static byte[][] toArray(ECChunk[] chunks) {
+  pub

hadoop git commit: HADOOP-11921 Correct the JIRA entry in CHANGES-HDFS-EC-7285.txt

2015-05-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 3140bc0f0 -> 88e6c4229


HADOOP-11921 Correct the JIRA entry in CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88e6c422
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88e6c422
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88e6c422

Branch: refs/heads/HDFS-7285
Commit: 88e6c422904b27a43618a4fca3168224fe46914b
Parents: 3140bc0
Author: Kai Zheng 
Authored: Thu May 7 15:11:57 2015 +0800
Committer: Kai Zheng 
Committed: Thu May 7 15:11:57 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88e6c422/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index bfbdcb9..7a344a8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -45,4 +45,4 @@
 
 HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)
 
-HADOOP-11920 Enhance tests for erasure coders. (Kai Zheng)
+HADOOP-11921 Enhance tests for erasure coders. (Kai Zheng)



hadoop git commit: Revert mistakenly committing of HADOOP-11920 patch

2015-05-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 ca32b8f12 -> 16ba1a508


Revert mistakenly committing of HADOOP-11920 patch


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ba1a50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ba1a50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ba1a50

Branch: refs/heads/HDFS-7285
Commit: 16ba1a50801eed6696a046b3f4dab468a43b6c5c
Parents: ca32b8f
Author: Kai Zheng 
Authored: Thu May 7 17:02:14 2015 +0800
Committer: Kai Zheng 
Committed: Thu May 7 17:02:14 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 -
 .../hadoop/fs/CommonConfigurationKeys.java  |  4 ++
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 20 ++--
 .../erasurecode/coder/AbstractErasureCoder.java |  6 ++-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 40 ++-
 .../rawcoder/AbstractRawErasureCoder.java   | 35 +-
 .../rawcoder/AbstractRawErasureDecoder.java | 51 
 .../rawcoder/AbstractRawErasureEncoder.java | 48 +-
 .../erasurecode/rawcoder/RawErasureCoder.java   |  8 +--
 .../erasurecode/rawcoder/RawErasureDecoder.java | 24 -
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 24 ++---
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |  6 +--
 .../hadoop/io/erasurecode/TestCoderBase.java|  4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 14 files changed, 130 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ba1a50/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7a344a8..9749270 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -44,5 +44,3 @@
 HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)
 
 HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)
-
-HADOOP-11921 Enhance tests for erasure coders. (Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ba1a50/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 3f2871b..bd2a24b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -143,6 +143,10 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
"io.erasurecode.codecs";
 
+  /** Use XOR raw coder when possible for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
+  "io.erasurecode.codec.rs.usexor";
+
   /** Raw coder factory for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
   "io.erasurecode.codec.rs.rawcoder";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ba1a50/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index 34dd90b..01e8f35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -58,15 +58,8 @@ public class ECChunk {
   public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
 ByteBuffer[] buffers = new ByteBuffer[chunks.length];
 
-ECChunk chunk;
 for (int i = 0; i < chunks.length; i++) {
-  chunk = chunks[i];
-  if (chunk == null) {
-buffers[i] = null;
-continue;
-  }
-
-  buffers[i] = chunk.getBuffer();
+  buffers[i] = chunks[i].getBuffer();
 }
 
 return buffers;
@@ -78,19 +71,12 @@ public class ECChunk {
* @param chunks
* @return an array of byte array
*/
-  public static byte[][] toArrays(ECChunk[] chunks) {
+  public static byte[][] t

hadoop git commit: HADOOP-11921. Enhance tests for erasure coders. Contributed by Kai Zheng

2015-05-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 16ba1a508 -> 0f7eb4636


HADOOP-11921. Enhance tests for erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f7eb463
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f7eb463
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f7eb463

Branch: refs/heads/HDFS-7285
Commit: 0f7eb46362752da1972e1b669e67cadef30c5547
Parents: 16ba1a5
Author: Kai Zheng 
Authored: Thu May 7 17:05:04 2015 +0800
Committer: Kai Zheng 
Committed: Thu May 7 17:05:04 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 50 ++-
 .../erasurecode/coder/TestErasureCoderBase.java | 89 +++-
 .../erasurecode/coder/TestRSErasureCoder.java   | 64 ++
 .../io/erasurecode/coder/TestXORCoder.java  | 24 --
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 76 +
 .../rawcoder/TestRSRawCoderBase.java| 51 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 45 +-
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 24 --
 9 files changed, 274 insertions(+), 151 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f7eb463/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9749270..7a344a8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -44,3 +44,5 @@
 HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)
 
 HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)
+
+HADOOP-11921 Enhance tests for erasure coders. (Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f7eb463/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 22fd98d..be1924c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
@@ -49,15 +49,15 @@ public abstract class TestCoderBase {
* Prepare before running the case.
* @param numDataUnits
* @param numParityUnits
-   * @param erasedIndexes
+   * @param erasedDataIndexes
*/
   protected void prepare(Configuration conf, int numDataUnits,
- int numParityUnits, int[] erasedIndexes) {
+ int numParityUnits, int[] erasedDataIndexes) {
 this.conf = conf;
 this.numDataUnits = numDataUnits;
 this.numParityUnits = numParityUnits;
-this.erasedDataIndexes = erasedIndexes != null ?
-erasedIndexes : new int[] {0};
+this.erasedDataIndexes = erasedDataIndexes != null ?
+erasedDataIndexes : new int[] {0};
   }
 
   /**
@@ -82,15 +82,19 @@ public abstract class TestCoderBase {
   }
 
   /**
-   * Adjust and return erased indexes based on the array of the input chunks (
-   * parity chunks + data chunks).
-   * @return
+   * Adjust and return erased indexes altogether, including erased data indexes
+   * and parity indexes.
+   * @return erased indexes altogether
*/
   protected int[] getErasedIndexesForDecoding() {
 int[] erasedIndexesForDecoding = new int[erasedDataIndexes.length];
+
+int idx = 0;
+
 for (int i = 0; i < erasedDataIndexes.length; i++) {
-  erasedIndexesForDecoding[i] = erasedDataIndexes[i] + numParityUnits;
+  erasedIndexesForDecoding[idx ++] = erasedDataIndexes[i] + numParityUnits;
 }
+
 return erasedIndexesForDecoding;
   }
 
@@ -116,30 +120,23 @@ public abstract class TestCoderBase {
   }
 
   /**
-   * Have a copy of the data chunks that's to be erased thereafter. The copy
-   * will be used to compare and verify with the to be recovered chunks.
+   * Erase chunks to test the recovering of them. Before erasure clone them
+   * first so could return them.
* @param dataChunks
-   * @return
+   * @return clone of erased chunks
*/
-  protected ECChunk[] copyDataChunksToErase(ECChunk[] dataChunks) {
-ECChunk[] copiedChunks = new ECChunk[erasedDataIndexes.length];
-
-int j = 0;
-for (int i = 0; i < erasedDataIndexes.l

hadoop git commit: HADOOP-11920. Refactor some codes for erasure coders. Contributed by Kai Zheng

2015-05-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 0f7eb4636 -> 2a89e1d33


HADOOP-11920. Refactor some codes for erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a89e1d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a89e1d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a89e1d3

Branch: refs/heads/HDFS-7285
Commit: 2a89e1d3322804b30877e06c8833151ea99f05b3
Parents: 0f7eb46
Author: Kai Zheng 
Authored: Thu May 7 21:02:50 2015 +0800
Committer: Kai Zheng 
Committed: Thu May 7 21:02:50 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 +
 .../hadoop/fs/CommonConfigurationKeys.java  |  4 --
 .../apache/hadoop/io/erasurecode/ECChunk.java   |  2 +-
 .../erasurecode/coder/AbstractErasureCoder.java |  6 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 40 +
 .../rawcoder/AbstractRawErasureCoder.java   | 62 +++-
 .../rawcoder/AbstractRawErasureDecoder.java | 54 ++---
 .../rawcoder/AbstractRawErasureEncoder.java | 52 
 .../erasurecode/rawcoder/RawErasureCoder.java   |  8 +--
 .../erasurecode/rawcoder/RawErasureDecoder.java | 24 +---
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 24 ++--
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |  6 +-
 .../hadoop/io/erasurecode/TestCoderBase.java|  4 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |  6 +-
 14 files changed, 155 insertions(+), 139 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a89e1d3/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7a344a8..06e7acf 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -46,3 +46,5 @@
 HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)
 
 HADOOP-11921 Enhance tests for erasure coders. (Kai Zheng)
+
+HADOOP-11920 Refactor some codes for erasure coders. (Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a89e1d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index bd2a24b..3f2871b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -143,10 +143,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
"io.erasurecode.codecs";
 
-  /** Use XOR raw coder when possible for the RS codec */
-  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
-  "io.erasurecode.codec.rs.usexor";
-
   /** Raw coder factory for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
   "io.erasurecode.codec.rs.rawcoder";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a89e1d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index 01e8f35..436e13e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -71,7 +71,7 @@ public class ECChunk {
* @param chunks
* @return an array of byte array
*/
-  public static byte[][] toArray(ECChunk[] chunks) {
+  public static byte[][] toArrays(ECChunk[] chunks) {
 byte[][] bytesArr = new byte[chunks.length][];
 
 ByteBuffer buffer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a89e1d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/Abs

hadoop git commit: HADOOP-11566 Add tests and fix for erasure coders to recover erased parity units. Contributed by Kai Zheng

2015-05-13 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 2b11befc4 -> 15b4f151e


HADOOP-11566 Add tests and fix for erasure coders to recover erased parity 
units. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15b4f151
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15b4f151
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15b4f151

Branch: refs/heads/HDFS-7285
Commit: 15b4f151eca8b143ee623682c64596c809d10021
Parents: 2b11bef
Author: Kai Zheng 
Authored: Wed May 13 23:00:00 2015 +0800
Committer: Kai Zheng 
Committed: Wed May 13 23:00:00 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 ++
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 17 ++-
 .../coder/AbstractErasureDecoder.java   | 13 --
 .../hadoop/io/erasurecode/TestCoderBase.java| 37 +++
 .../erasurecode/coder/TestErasureCoderBase.java | 37 +++
 .../erasurecode/coder/TestRSErasureCoder.java   | 48 +++-
 .../io/erasurecode/coder/TestXORCoder.java  |  6 +--
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 37 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  2 +-
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 11 -
 10 files changed, 134 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b4f151/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 06e7acf..379d92f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -48,3 +48,6 @@
 HADOOP-11921 Enhance tests for erasure coders. (Kai Zheng)
 
 HADOOP-11920 Refactor some codes for erasure coders. (Kai Zheng)
+
+HADOOP-11566 Add tests and fix for erasure coders to recover erased parity 
units.
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b4f151/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index 436e13e..69a8343 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -58,8 +58,14 @@ public class ECChunk {
   public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
 ByteBuffer[] buffers = new ByteBuffer[chunks.length];
 
+ECChunk chunk;
 for (int i = 0; i < chunks.length; i++) {
-  buffers[i] = chunks[i].getBuffer();
+  chunk = chunks[i];
+  if (chunk == null) {
+buffers[i] = null;
+  } else {
+buffers[i] = chunk.getBuffer();
+  }
 }
 
 return buffers;
@@ -75,8 +81,15 @@ public class ECChunk {
 byte[][] bytesArr = new byte[chunks.length][];
 
 ByteBuffer buffer;
+ECChunk chunk;
 for (int i = 0; i < chunks.length; i++) {
-  buffer = chunks[i].getBuffer();
+  chunk = chunks[i];
+  if (chunk == null) {
+bytesArr[i] = null;
+continue;
+  }
+
+  buffer = chunk.getBuffer();
   if (buffer.hasArray()) {
 bytesArr[i] = buffer.array();
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b4f151/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
index cd31294..6437236 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
@@ -60,16 +60,21 @@ public abstract class AbstractErasureDecoder extends 
AbstractErasureCoder {
   }
 
   /**
-   * Which blocks were erased ? We only care data blocks here. Sub-classes can
-   * override this behavior.
+   * Which blocks were erased ?
* @param blockGroup
* @return output blocks to recover
*/
   protected ECBlock[] getOutputBlock

hadoop git commit: HADOOP-11938 Enhance ByteBuffer version encode/decode API of raw erasure coder. Contributed by Kai Zheng

2015-05-15 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a35936d69 -> 0180ef08a


HADOOP-11938 Enhance ByteBuffer version encode/decode API of raw erasure coder. 
Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0180ef08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0180ef08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0180ef08

Branch: refs/heads/HDFS-7285
Commit: 0180ef08af9455da0c6ef2f1931cdb6b96b56796
Parents: a35936d
Author: Kai Zheng 
Authored: Sat May 16 16:40:48 2015 +0800
Committer: Kai Zheng 
Committed: Sat May 16 16:40:48 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   3 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   |  35 ++---
 .../rawcoder/AbstractRawErasureCoder.java   |  77 +--
 .../rawcoder/AbstractRawErasureDecoder.java |  69 --
 .../rawcoder/AbstractRawErasureEncoder.java |  66 --
 .../io/erasurecode/rawcoder/RSRawDecoder.java   |  22 ++--
 .../io/erasurecode/rawcoder/RSRawEncoder.java   |  41 +++---
 .../io/erasurecode/rawcoder/XORRawDecoder.java  |  30 +++--
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |  40 +++---
 .../erasurecode/rawcoder/util/GaloisField.java  | 112 
 .../hadoop/io/erasurecode/TestCoderBase.java| 131 +++
 .../erasurecode/coder/TestErasureCoderBase.java |  21 ++-
 .../io/erasurecode/rawcoder/TestRSRawCoder.java |  12 +-
 .../rawcoder/TestRSRawCoderBase.java|  12 +-
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  57 +++-
 .../erasurecode/rawcoder/TestXORRawCoder.java   |  19 +++
 16 files changed, 535 insertions(+), 212 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0180ef08/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 379d92f..90731b6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -51,3 +51,6 @@
 
 HADOOP-11566 Add tests and fix for erasure coders to recover erased parity 
units.
 ( Kai Zheng )
+
+HADOOP-11938 Enhance ByteBuffer version encode/decode API of raw erasure 
coder.
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0180ef08/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index 69a8343..310c738 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -72,34 +72,15 @@ public class ECChunk {
   }
 
   /**
-   * Convert an array of this chunks to an array of byte array.
-   * Note the chunk buffers are not affected.
-   * @param chunks
-   * @return an array of byte array
+   * Convert to a bytes array, just for test usage.
+   * @return bytes array
*/
-  public static byte[][] toArrays(ECChunk[] chunks) {
-byte[][] bytesArr = new byte[chunks.length][];
-
-ByteBuffer buffer;
-ECChunk chunk;
-for (int i = 0; i < chunks.length; i++) {
-  chunk = chunks[i];
-  if (chunk == null) {
-bytesArr[i] = null;
-continue;
-  }
-
-  buffer = chunk.getBuffer();
-  if (buffer.hasArray()) {
-bytesArr[i] = buffer.array();
-  } else {
-bytesArr[i] = new byte[buffer.remaining()];
-// Avoid affecting the original one
-buffer.mark();
-buffer.get(bytesArr[i]);
-buffer.reset();
-  }
-}
+  public byte[] toBytesArray() {
+byte[] bytesArr = new byte[chunkBuffer.remaining()];
+// Avoid affecting the original one
+chunkBuffer.mark();
+chunkBuffer.get(bytesArr);
+chunkBuffer.reset();
 
 return bytesArr;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0180ef08/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 2378bb5

hadoop git commit: HDFS-8367 BlockInfoStriped uses EC schema. Contributed by Kai Sasaki

2015-05-18 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 f34667274 -> f9be529f4


HDFS-8367 BlockInfoStriped uses EC schema. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9be529f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9be529f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9be529f

Branch: refs/heads/HDFS-7285
Commit: f9be529f429e7ca83afc439888e9e3c97f21787d
Parents: f346672
Author: Kai Zheng 
Authored: Tue May 19 00:10:30 2015 +0800
Committer: Kai Zheng 
Committed: Tue May 19 00:10:30 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  2 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  6 +--
 .../blockmanagement/BlockInfoStriped.java   | 24 
 .../BlockInfoStripedUnderConstruction.java  | 12 +++---
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  4 +-
 .../hdfs/server/namenode/FSDirectory.java   |  3 ++
 .../hdfs/server/namenode/FSEditLogLoader.java   | 34 +
 .../hdfs/server/namenode/FSImageFormat.java | 10 +++--
 .../server/namenode/FSImageFormatPBINode.java   |  7 +++-
 .../server/namenode/FSImageSerialization.java   | 14 ---
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../blockmanagement/TestBlockInfoStriped.java   |  8 +++-
 .../server/namenode/TestFSEditLogLoader.java|  8 +++-
 .../hdfs/server/namenode/TestFSImage.java   |  6 ++-
 .../server/namenode/TestStripedINodeFile.java   | 39 ++--
 15 files changed, 99 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9be529f/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 1456434..333d85f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -215,3 +215,5 @@
 
 HDFS-8391. NN should consider current EC tasks handling count from DN 
while 
 assigning new tasks. (umamahesh)
+
+HDFS-8367. BlockInfoStriped uses EC schema. (Kai Sasaki via Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9be529f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 94b2ff9..a6a356c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -203,6 +203,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
 import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand;
@@ -445,9 +446,8 @@ public class PBHelper {
 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
   }
 
-  public static BlockInfoStriped convert(StripedBlockProto p) {
-return new BlockInfoStriped(convert(p.getBlock()),
-(short) p.getDataBlockNum(), (short) p.getParityBlockNum());
+  public static BlockInfoStriped convert(StripedBlockProto p, ECSchema schema) 
{
+return new BlockInfoStriped(convert(p.getBlock()), schema);
   }
 
   public static StripedBlockProto convert(BlockInfoStriped blk) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9be529f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index f0e52e3..d7a48a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -19

hadoop git commit: HADOOP-12013 Generate fixed data to perform erasure coder test. Contributed by Kai Zheng

2015-05-22 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 24d0fbe60 -> 631322517


HADOOP-12013 Generate fixed data to perform erasure coder test. Contributed by 
Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63132251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63132251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63132251

Branch: refs/heads/HDFS-7285
Commit: 6313225177d3895b2bdf96cebea5ef86f2b925b7
Parents: 24d0fbe
Author: Kai Zheng 
Authored: Fri May 22 23:47:52 2015 +0800
Committer: Kai Zheng 
Committed: Fri May 22 23:47:52 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 87 +++-
 .../io/erasurecode/rawcoder/TestRSRawCoder.java |  8 +-
 .../rawcoder/TestRSRawCoderBase.java| 21 +++--
 4 files changed, 110 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63132251/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c799b4f..531b8d5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -54,3 +54,5 @@
 
 HADOOP-11938. Enhance ByteBuffer version encode/decode API of raw erasure 
 coder. (Kai Zheng via Zhe Zhang)
+
+HADOOP-12013. Generate fixed data to perform erasure coder test. (Kai 
Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63132251/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index cc3617c..3686695 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
@@ -52,6 +52,12 @@ public abstract class TestCoderBase {
   // may go to different coding implementations.
   protected boolean usingDirectBuffer = true;
 
+  protected boolean usingFixedData = true;
+  // Using this the generated data can be repeatable across multiple calls to
+  // encode(), in order for troubleshooting.
+  private static int FIXED_DATA_GENERATOR = 0;
+  protected byte[][] fixedData;
+
   protected int getChunkSize() {
 return chunkSize;
   }
@@ -63,13 +69,17 @@ public abstract class TestCoderBase {
 
   /**
* Prepare before running the case.
+   * @param conf
* @param numDataUnits
* @param numParityUnits
* @param erasedDataIndexes
+   * @param erasedParityIndexes
+   * @param usingFixedData Using fixed or pre-generated data to test instead of
+   *   generating data
*/
   protected void prepare(Configuration conf, int numDataUnits,
  int numParityUnits, int[] erasedDataIndexes,
- int[] erasedParityIndexes) {
+ int[] erasedParityIndexes, boolean usingFixedData) {
 this.conf = conf;
 this.numDataUnits = numDataUnits;
 this.numParityUnits = numParityUnits;
@@ -77,6 +87,38 @@ public abstract class TestCoderBase {
 erasedDataIndexes : new int[] {0};
 this.erasedParityIndexes = erasedParityIndexes != null ?
 erasedParityIndexes : new int[] {0};
+this.usingFixedData = usingFixedData;
+if (usingFixedData) {
+  prepareFixedData();
+}
+  }
+
+  /**
+   * Prepare before running the case.
+   * @param conf
+   * @param numDataUnits
+   * @param numParityUnits
+   * @param erasedDataIndexes
+   * @param erasedParityIndexes
+   */
+  protected void prepare(Configuration conf, int numDataUnits,
+ int numParityUnits, int[] erasedDataIndexes,
+ int[] erasedParityIndexes) {
+prepare(conf, numDataUnits, numParityUnits, erasedDataIndexes,
+erasedParityIndexes, false);
+  }
+
+  /**
+   * Prepare before running the case.
+   * @param numDataUnits
+   * @param numParityUnits
+   * @param erasedDataIndexes
+   * @param erasedParityIndexes
+   */
+  protected void prepare(int numDataUnits, int numParityUnits,
+ int[] erasedDataIndexes, int[] erasedParityIndexes) {
+prepare(null, numDataUnits, numParityUnits, erasedDataIndexes,
+erasedParityIndexes, fal

hadoop git commit: HDFS-8382 Remove chunkSize and initialize from erasure coder. Contributed by Kai Zheng

2015-05-24 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 61172fba8 -> 6a0afa6b1


HDFS-8382 Remove chunkSize and initialize from erasure coder. Contributed by 
Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a0afa6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a0afa6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a0afa6b

Branch: refs/heads/HDFS-7285
Commit: 6a0afa6b1fcdfcdb5e4ddd4f170112619056a385
Parents: 61172fb
Author: Kai Zheng 
Authored: Mon May 25 16:13:29 2015 +0800
Committer: Kai Zheng 
Committed: Mon May 25 16:13:29 2015 +0800

--
 .../erasurecode/codec/AbstractErasureCodec.java | 43 ++--
 .../io/erasurecode/codec/ErasureCodec.java  |  7 
 .../io/erasurecode/codec/RSErasureCodec.java| 13 --
 .../io/erasurecode/codec/XORErasureCodec.java   | 13 +++---
 .../erasurecode/coder/AbstractErasureCoder.java | 39 --
 .../coder/AbstractErasureDecoder.java   |  9 
 .../coder/AbstractErasureEncoder.java   |  9 
 .../io/erasurecode/coder/ErasureCoder.java  | 27 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java  | 16 ++--
 .../io/erasurecode/coder/RSErasureEncoder.java  | 16 ++--
 .../io/erasurecode/coder/XORErasureDecoder.java | 14 +--
 .../io/erasurecode/coder/XORErasureEncoder.java | 16 ++--
 .../rawcoder/AbstractRawErasureCoder.java   | 15 ++-
 .../rawcoder/AbstractRawErasureDecoder.java |  4 ++
 .../rawcoder/AbstractRawErasureEncoder.java |  4 ++
 .../io/erasurecode/rawcoder/RSRawDecoder.java   |  5 +--
 .../io/erasurecode/rawcoder/RSRawEncoder.java   |  6 +--
 .../rawcoder/RSRawErasureCoderFactory.java  |  8 ++--
 .../erasurecode/rawcoder/RawErasureCoder.java   | 14 ---
 .../rawcoder/RawErasureCoderFactory.java|  8 +++-
 .../io/erasurecode/rawcoder/XORRawDecoder.java  |  4 ++
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |  4 ++
 .../rawcoder/XORRawErasureCoderFactory.java |  8 ++--
 .../erasurecode/coder/TestErasureCoderBase.java | 42 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 15 ---
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  2 +
 .../hadoop/hdfs/DFSStripedOutputStream.java |  3 +-
 .../erasurecode/ErasureCodingWorker.java| 14 +++
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  4 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  3 +-
 31 files changed, 187 insertions(+), 201 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a0afa6b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
index 9993786..0cacfbc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.io.erasurecode.codec;
 
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.coder.*;
 import org.apache.hadoop.io.erasurecode.grouper.BlockGrouper;
 
 /**
@@ -28,10 +27,9 @@ import org.apache.hadoop.io.erasurecode.grouper.BlockGrouper;
 public abstract class AbstractErasureCodec extends Configured
 implements ErasureCodec {
 
-  private ECSchema schema;
+  private final ECSchema schema;
 
-  @Override
-  public void setSchema(ECSchema schema) {
+  public AbstractErasureCodec(ECSchema schema) {
 this.schema = schema;
   }
 
@@ -39,7 +37,7 @@ public abstract class AbstractErasureCodec extends Configured
 return schema.getCodecName();
   }
 
-  protected ECSchema getSchema() {
+  public ECSchema getSchema() {
 return schema;
   }
 
@@ -50,39 +48,4 @@ public abstract class AbstractErasureCodec extends Configured
 
 return blockGrouper;
   }
-
-  @Override
-  public ErasureCoder createEncoder() {
-ErasureCoder encoder = doCreateEncoder();
-prepareErasureCoder(encoder);
-return encoder;
-  }
-
-  /**
-   * Create a new encoder instance to be initialized afterwards.
-   * @return encoder
-   */
-  protected abstract ErasureCoder doCreateEncoder();
-
-  @Override
-  public ErasureCoder createDecoder() {
-ErasureCoder decoder = doCreateDecoder();
-prepareErasureCoder(decoder);
-return decoder;
-  }

hadoop git commit: HADOOP-11847 Enhance raw coder allowing to read least required inputs in decoding. Contributed by Kai Zheng

2015-05-25 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 f56e19286 -> 48513749a


HADOOP-11847 Enhance raw coder allowing to read least required inputs in 
decoding. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48513749
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48513749
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48513749

Branch: refs/heads/HDFS-7285
Commit: 48513749a6ee20e7e2b29f78257e6ed05f38859e
Parents: f56e192
Author: Kai Zheng 
Authored: Tue May 26 22:45:19 2015 +0800
Committer: Kai Zheng 
Committed: Tue May 26 22:45:19 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   3 +
 .../rawcoder/AbstractRawErasureCoder.java   |  27 +++-
 .../rawcoder/AbstractRawErasureDecoder.java |  75 +++--
 .../rawcoder/AbstractRawErasureEncoder.java |   8 +-
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 162 ++-
 .../erasurecode/rawcoder/RawErasureDecoder.java |  20 ++-
 .../io/erasurecode/rawcoder/XORRawDecoder.java  |   2 +-
 .../io/erasurecode/rawcoder/XORRawEncoder.java  |   2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  |  12 +-
 .../hadoop/io/erasurecode/TestCoderBase.java|  39 ++---
 .../erasurecode/coder/TestErasureCoderBase.java |   1 -
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 101 ++--
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  54 +++
 .../erasurecode/rawcoder/TestXORRawCoder.java   |  45 ++
 14 files changed, 397 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48513749/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c9b80d3..0c24473 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -59,3 +59,6 @@
 
 HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders
 (vinayakumarb)
+
+HADOOP-11847. Enhance raw coder allowing to read least required inputs in 
decoding.
+(Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48513749/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 06ae660..e6a1542 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -60,12 +60,13 @@ public abstract class AbstractRawErasureCoder
   }
 
   /**
-   * Ensure output buffer filled with ZERO bytes fully in chunkSize.
-   * @param buffer a buffer ready to write chunk size bytes
+   * Ensure a buffer filled with ZERO bytes from current readable/writable
+   * position.
+   * @param buffer a buffer ready to read / write certain size bytes
* @return the buffer itself, with ZERO bytes written, the position and limit
* are not changed after the call
*/
-  protected ByteBuffer resetOutputBuffer(ByteBuffer buffer) {
+  protected ByteBuffer resetBuffer(ByteBuffer buffer) {
 int pos = buffer.position();
 for (int i = pos; i < buffer.limit(); ++i) {
   buffer.put((byte) 0);
@@ -77,7 +78,7 @@ public abstract class AbstractRawErasureCoder
 
   /**
* Ensure the buffer (either input or output) ready to read or write with 
ZERO
-   * bytes fully in chunkSize.
+   * bytes fully in specified length of len.
* @param buffer bytes array buffer
* @return the buffer itself
*/
@@ -92,11 +93,16 @@ public abstract class AbstractRawErasureCoder
   /**
* Check and ensure the buffers are of the length specified by dataLen.
* @param buffers
+   * @param allowNull
* @param dataLen
*/
-  protected void ensureLength(ByteBuffer[] buffers, int dataLen) {
+  protected void ensureLength(ByteBuffer[] buffers,
+  boolean allowNull, int dataLen) {
 for (int i = 0; i < buffers.length; ++i) {
-  if (buffers[i].remaining() != dataLen) {
+  if (buffers[i] == null && !allowNull) {
+throw new HadoopIllegalArgumentException(
+"Invalid buffer found, not allowing null");
+  

hadoop git commit: HADOOP-12011 Allow to dump verbose information to ease debugging in raw erasure coders. Contributed by Kai Zheng

2015-06-01 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 014bd32c5 -> 0799e1e4b


HADOOP-12011 Allow to dump verbose information to ease debugging in raw erasure 
coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0799e1e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0799e1e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0799e1e4

Branch: refs/heads/HDFS-7285
Commit: 0799e1e4b6a343d3960619b844e15037a460a3ef
Parents: 014bd32
Author: Kai Zheng 
Authored: Tue Jun 2 22:05:16 2015 +0800
Committer: Kai Zheng 
Committed: Tue Jun 2 22:05:16 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../io/erasurecode/rawcoder/util/DumpUtil.java  | 85 
 .../hadoop/io/erasurecode/TestCoderBase.java| 42 ++
 .../io/erasurecode/rawcoder/TestRSRawCoder.java |  1 +
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  6 ++
 5 files changed, 137 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0799e1e4/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 0c24473..3559436 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -61,4 +61,7 @@
 (vinayakumarb)
 
 HADOOP-11847. Enhance raw coder allowing to read least required inputs in 
decoding.
+(Kai Zheng)
+
+HADOOP-12011. Allow to dump verbose information to ease debugging in raw 
erasure coders
 (Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0799e1e4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/DumpUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/DumpUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/DumpUtil.java
new file mode 100644
index 000..c8f133f
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/DumpUtil.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder.util;
+
+import org.apache.hadoop.io.erasurecode.ECChunk;
+
+/**
+ * A dump utility class for debugging data erasure coding/decoding issues. 
Don't
+ * suggest they are used in runtime production codes.
+ */
+public final class DumpUtil {
+  private static final String HEX_CHARS_STR = "0123456789ABCDEF";
+  private static final char[] HEX_CHARS = HEX_CHARS_STR.toCharArray();
+
+  private DumpUtil() {
+// No called
+  }
+
+  /**
+   * Convert bytes into format like 0x02 02 00 80.
+   */
+  public static String bytesToHex(byte[] bytes, int limit) {
+if (limit > bytes.length) {
+  limit = bytes.length;
+}
+int len = limit * 2;
+len += limit; // for ' ' appended for each char
+len += 2; // for '0x' prefix
+char[] hexChars = new char[len];
+hexChars[0] = '0';
+hexChars[1] = 'x';
+for (int j = 0; j < limit; j++) {
+  int v = bytes[j] & 0xFF;
+  hexChars[j * 3 + 2] = HEX_CHARS[v >>> 4];
+  hexChars[j * 3 + 3] = HEX_CHARS[v & 0x0F];
+  hexChars[j * 3 + 4] = ' ';
+}
+
+return new String(hexChars);
+  }
+
+  /**
+   * Print data in hex format in an array of chunks.
+   * @param header
+   * @param chunks
+   */
+  public static void dumpChunks(String header, ECChunk[] chunks) {
+System.out.println();
+System.out.println(header);
+for (int i = 0; i < chunks.length; i++) {
+  dumpChunk(chunks[i]);
+}
+System.out.println();
+  }
+
+  /**
+   * Print data in hex format in a chunk.
+   * @param 

hadoop git commit: HADOOP-12065 Using more meaningful keys in EC schema. Contributed by Kai Zheng

2015-06-07 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 2eee19cd1 -> c41b02cc0


HADOOP-12065 Using more meaningful keys in EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c41b02cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c41b02cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c41b02cc

Branch: refs/heads/HDFS-7285
Commit: c41b02cc0058239657263d45d969e71b14e6a589
Parents: 2eee19c
Author: Kai Zheng 
Authored: Mon Jun 8 20:24:17 2015 +0800
Committer: Kai Zheng 
Committed: Mon Jun 8 20:24:17 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 4 +++-
 .../main/java/org/apache/hadoop/io/erasurecode/ECSchema.java | 4 ++--
 .../org/apache/hadoop/io/erasurecode/TestSchemaLoader.java   | 8 
 3 files changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c41b02cc/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 3559436..505eabd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -64,4 +64,6 @@
 (Kai Zheng)
 
 HADOOP-12011. Allow to dump verbose information to ease debugging in raw 
erasure coders
-(Kai Zheng)
\ No newline at end of file
+(Kai Zheng)
+
+HADOOP-12065. Using more meaningful keys in EC schema. (Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c41b02cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index fdc569e..1e07d3d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -25,8 +25,8 @@ import java.util.Map;
  * Erasure coding schema to housekeeper relevant information.
  */
 public final class ECSchema {
-  public static final String NUM_DATA_UNITS_KEY = "k";
-  public static final String NUM_PARITY_UNITS_KEY = "m";
+  public static final String NUM_DATA_UNITS_KEY = "numDataUnits";
+  public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
   public static final String CODEC_NAME_KEY = "codec";
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c41b02cc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
index 939fa9b..50d2091 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
@@ -40,13 +40,13 @@ public class TestSchemaLoader {
 out.println("");
 out.println("");
 out.println("  ");
-out.println("6");
-out.println("3");
+out.println("6");
+out.println("3");
 out.println("RS");
 out.println("  ");
 out.println("  ");
-out.println("10");
-out.println("4");
+out.println("10");
+out.println("4");
 out.println("RS");
 out.println("  ");
 out.println("");



hadoop git commit: HDFS-8557 Allow to configure RS and XOR raw coders. Contributed by Kai Zheng

2015-06-09 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 c41b02cc0 -> e299fe86b


HDFS-8557 Allow to configure RS and XOR raw coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e299fe86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e299fe86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e299fe86

Branch: refs/heads/HDFS-7285
Commit: e299fe86b889968a0093f9f9b097dd71b4f49e88
Parents: c41b02c
Author: Kai Zheng 
Authored: Wed Jun 10 15:35:26 2015 +0800
Committer: Kai Zheng 
Committed: Wed Jun 10 15:35:26 2015 +0800

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +-
 .../hadoop/fs/CommonConfigurationKeys.java  |   6 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 144 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  67 +
 .../io/erasurecode/coder/RSErasureDecoder.java  |  11 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |   9 +-
 .../io/erasurecode/coder/XORErasureDecoder.java |  10 +-
 .../io/erasurecode/coder/XORErasureEncoder.java |  10 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  23 +--
 .../hadoop/hdfs/DFSStripedOutputStream.java |  16 ++-
 .../erasurecode/ErasureCodingWorker.java|   3 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   7 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  22 +--
 .../TestDFSStripedOutputStreamWithFailure.java  |   2 +-
 14 files changed, 216 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e299fe86/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 505eabd..9ccd3a7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -66,4 +66,6 @@
 HADOOP-12011. Allow to dump verbose information to ease debugging in raw 
erasure coders
 (Kai Zheng)
 
-HADOOP-12065. Using more meaningful keys in EC schema. (Kai Zheng)
\ No newline at end of file
+HADOOP-12065. Using more meaningful keys in EC schema. (Kai Zheng)
+
+HDFS-8557. Allow to configure RS and XOR raw coders (Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e299fe86/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 3f2871b..9588254 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -143,10 +143,14 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
"io.erasurecode.codecs";
 
-  /** Raw coder factory for the RS codec */
+  /** Raw coder factory for the RS codec. */
   public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
   "io.erasurecode.codec.rs.rawcoder";
 
+  /** Raw coder factory for the XOR codec. */
+  public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY =
+  "io.erasurecode.codec.xor.rawcoder";
+
   /**
* Service Authorization
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e299fe86/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
new file mode 100644
index 000..5d22624
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apac

hadoop git commit: HDFS-8905. Refactor DFSInputStream#ReaderStrategy. Contributed by Kai Zheng and Sammi Chen

2016-08-24 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha1 e07068472 -> 675dfbeed


HDFS-8905. Refactor DFSInputStream#ReaderStrategy. Contributed by Kai Zheng and 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/675dfbee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/675dfbee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/675dfbee

Branch: refs/heads/branch-3.0.0-alpha1
Commit: 675dfbeed4cd258e39e419933a0d97db3a5f79ca
Parents: e070684
Author: Kai Zheng 
Authored: Wed Aug 24 21:55:18 2016 +0800
Committer: Kai Zheng 
Committed: Wed Aug 24 21:55:18 2016 +0800

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 235 ++-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  23 +-
 .../org/apache/hadoop/hdfs/ReadStatistics.java  | 106 +
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  | 215 +
 .../hadoop/hdfs/client/HdfsDataInputStream.java |   3 +-
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  |  16 ++
 .../hadoop/hdfs/TestExternalBlockReader.java|   4 +-
 7 files changed, 374 insertions(+), 228 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/675dfbee/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6132f83..7a10ba4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -71,6 +71,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -145,94 +146,6 @@ public class DFSInputStream extends FSInputStream
 return extendedReadBuffers;
   }
 
-  public static class ReadStatistics {
-public ReadStatistics() {
-  clear();
-}
-
-public ReadStatistics(ReadStatistics rhs) {
-  this.totalBytesRead = rhs.getTotalBytesRead();
-  this.totalLocalBytesRead = rhs.getTotalLocalBytesRead();
-  this.totalShortCircuitBytesRead = rhs.getTotalShortCircuitBytesRead();
-  this.totalZeroCopyBytesRead = rhs.getTotalZeroCopyBytesRead();
-}
-
-/**
- * @return The total bytes read.  This will always be at least as
- * high as the other numbers, since it includes all of them.
- */
-public long getTotalBytesRead() {
-  return totalBytesRead;
-}
-
-/**
- * @return The total local bytes read.  This will always be at least
- * as high as totalShortCircuitBytesRead, since all short-circuit
- * reads are also local.
- */
-public long getTotalLocalBytesRead() {
-  return totalLocalBytesRead;
-}
-
-/**
- * @return The total short-circuit local bytes read.
- */
-public long getTotalShortCircuitBytesRead() {
-  return totalShortCircuitBytesRead;
-}
-
-/**
- * @return The total number of zero-copy bytes read.
- */
-public long getTotalZeroCopyBytesRead() {
-  return totalZeroCopyBytesRead;
-}
-
-/**
- * @return The total number of bytes read which were not local.
- */
-public long getRemoteBytesRead() {
-  return totalBytesRead - totalLocalBytesRead;
-}
-
-void addRemoteBytes(long amt) {
-  this.totalBytesRead += amt;
-}
-
-void addLocalBytes(long amt) {
-  this.totalBytesRead += amt;
-  this.totalLocalBytesRead += amt;
-}
-
-void addShortCircuitBytes(long amt) {
-  this.totalBytesRead += amt;
-  this.totalLocalBytesRead += amt;
-  this.totalShortCircuitBytesRead += amt;
-}
-
-void addZeroCopyBytes(long amt) {
-  this.totalBytesRead += amt;
-  this.totalLocalBytesRead += amt;
-  this.totalShortCircuitBytesRead += amt;
-  this.totalZeroCopyBytesRead += amt;
-}
-
-void clear() {
-  this.totalBytesRead = 0;
-  this.totalLocalBytesRead = 0;
-  this.totalShortCircuitBytesRead = 0;
-  this.totalZeroCopyBytesRead = 0;
-}
-
-private long totalBytesRead;
-
-private long totalLocalBytesRead;
-
-private long totalShortCircuitBytesRead;
-
-private long totalZeroCopyBytesRead;
-  }
-
   /**

hadoop git commit: HDFS-8905. Refactor DFSInputStream#ReaderStrategy. Contributed by Kai Zheng and Sammi Chen

2016-08-24 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec252ce0f -> 793447f79


HDFS-8905. Refactor DFSInputStream#ReaderStrategy. Contributed by Kai Zheng and 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/793447f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/793447f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/793447f7

Branch: refs/heads/trunk
Commit: 793447f79924c97c2b562d5e41fa85adf19673fe
Parents: ec252ce
Author: Kai Zheng 
Authored: Wed Aug 24 21:57:23 2016 +0800
Committer: Kai Zheng 
Committed: Wed Aug 24 21:57:23 2016 +0800

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 235 ++-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  23 +-
 .../org/apache/hadoop/hdfs/ReadStatistics.java  | 106 +
 .../org/apache/hadoop/hdfs/ReaderStrategy.java  | 215 +
 .../hadoop/hdfs/client/HdfsDataInputStream.java |   3 +-
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  |  16 ++
 .../hadoop/hdfs/TestExternalBlockReader.java|   4 +-
 7 files changed, 374 insertions(+), 228 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/793447f7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6132f83..7a10ba4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -71,6 +71,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -145,94 +146,6 @@ public class DFSInputStream extends FSInputStream
 return extendedReadBuffers;
   }
 
-  public static class ReadStatistics {
-public ReadStatistics() {
-  clear();
-}
-
-public ReadStatistics(ReadStatistics rhs) {
-  this.totalBytesRead = rhs.getTotalBytesRead();
-  this.totalLocalBytesRead = rhs.getTotalLocalBytesRead();
-  this.totalShortCircuitBytesRead = rhs.getTotalShortCircuitBytesRead();
-  this.totalZeroCopyBytesRead = rhs.getTotalZeroCopyBytesRead();
-}
-
-/**
- * @return The total bytes read.  This will always be at least as
- * high as the other numbers, since it includes all of them.
- */
-public long getTotalBytesRead() {
-  return totalBytesRead;
-}
-
-/**
- * @return The total local bytes read.  This will always be at least
- * as high as totalShortCircuitBytesRead, since all short-circuit
- * reads are also local.
- */
-public long getTotalLocalBytesRead() {
-  return totalLocalBytesRead;
-}
-
-/**
- * @return The total short-circuit local bytes read.
- */
-public long getTotalShortCircuitBytesRead() {
-  return totalShortCircuitBytesRead;
-}
-
-/**
- * @return The total number of zero-copy bytes read.
- */
-public long getTotalZeroCopyBytesRead() {
-  return totalZeroCopyBytesRead;
-}
-
-/**
- * @return The total number of bytes read which were not local.
- */
-public long getRemoteBytesRead() {
-  return totalBytesRead - totalLocalBytesRead;
-}
-
-void addRemoteBytes(long amt) {
-  this.totalBytesRead += amt;
-}
-
-void addLocalBytes(long amt) {
-  this.totalBytesRead += amt;
-  this.totalLocalBytesRead += amt;
-}
-
-void addShortCircuitBytes(long amt) {
-  this.totalBytesRead += amt;
-  this.totalLocalBytesRead += amt;
-  this.totalShortCircuitBytesRead += amt;
-}
-
-void addZeroCopyBytes(long amt) {
-  this.totalBytesRead += amt;
-  this.totalLocalBytesRead += amt;
-  this.totalShortCircuitBytesRead += amt;
-  this.totalZeroCopyBytesRead += amt;
-}
-
-void clear() {
-  this.totalBytesRead = 0;
-  this.totalLocalBytesRead = 0;
-  this.totalShortCircuitBytesRead = 0;
-  this.totalZeroCopyBytesRead = 0;
-}
-
-private long totalBytesRead;
-
-private long totalLocalBytesRead;
-
-private long totalShortCircuitBytesRead;
-
-private long totalZeroCopyBytesRead;
-  }
-
   /**
* This variable tracks t

hadoop git commit: MAPREDUCE-6578. Add support for HDFS heterogeneous storage testing to TestDFSIO. Contributed by Wei Zhou and Sammi Chen

2016-08-24 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk 793447f79 -> 0ce1ab95c


MAPREDUCE-6578. Add support for HDFS heterogeneous storage testing to 
TestDFSIO. Contributed by Wei Zhou and Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ce1ab95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ce1ab95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ce1ab95

Branch: refs/heads/trunk
Commit: 0ce1ab95cc1178f9ea763fd1f5a65a890b23b0de
Parents: 793447f
Author: Kai Zheng 
Authored: Wed Aug 24 22:17:05 2016 +0800
Committer: Kai Zheng 
Committed: Wed Aug 24 22:17:05 2016 +0800

--
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 53 +---
 1 file changed, 47 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ce1ab95/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index e7aa66b..05d4d77 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -29,6 +29,7 @@ import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.PrintStream;
 import java.text.DecimalFormat;
+import java.util.Collection;
 import java.util.Date;
 import java.util.Random;
 import java.util.StringTokenizer;
@@ -36,7 +37,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
@@ -102,7 +105,8 @@ public class TestDFSIO implements Tool {
 " [-compression codecClassName]" +
 " [-nrFiles N]" +
 " [-size Size[B|KB|MB|GB|TB]]" +
-" [-resFile resultFileName] [-bufferSize Bytes]";
+" [-resFile resultFileName] [-bufferSize Bytes]" +
+" [-storagePolicy storagePolicyName]";
 
   private Configuration config;
 
@@ -305,7 +309,7 @@ public class TestDFSIO implements Tool {
 writer = null;
   }
 }
-LOG.info("created control files for: "+nrFiles+" files");
+LOG.info("created control files for: " + nrFiles + " files");
   }
 
   private static String getFileName(int fIdx) {
@@ -326,6 +330,7 @@ public class TestDFSIO implements Tool {
*/
   private abstract static class IOStatMapper extends IOMapperBase {
 protected CompressionCodec compressionCodec;
+protected String blockStoragePolicy;
 
 IOStatMapper() {
 }
@@ -350,6 +355,8 @@ public class TestDFSIO implements Tool {
 compressionCodec = (CompressionCodec)
 ReflectionUtils.newInstance(codec, getConf());
   }
+
+  blockStoragePolicy = getConf().get("test.io.block.storage.policy", null);
 }
 
 @Override // IOMapperBase
@@ -389,8 +396,11 @@ public class TestDFSIO implements Tool {
 @Override // IOMapperBase
 public Closeable getIOStream(String name) throws IOException {
   // create file
-  OutputStream out =
-  fs.create(new Path(getDataDir(getConf()), name), true, bufferSize);
+  Path filePath = new Path(getDataDir(getConf()), name);
+  OutputStream out = fs.create(filePath, true, bufferSize);
+  if (blockStoragePolicy != null) {
+fs.setStoragePolicy(filePath, blockStoragePolicy);
+  }
   if(compressionCodec != null)
 out = compressionCodec.createOutputStream(out);
   LOG.info("out = " + out.getClass().getName());
@@ -713,8 +723,9 @@ public class TestDFSIO implements Tool {
   System.err.print(StringUtils.stringifyException(e));
   res = -2;
 }
-if(res == -1)
-  System.err.print(USAGE);
+if (res == -1) {
+  System.err.println(USAGE);
+}
 System.exit(res);
   }
 
@@ -727,6 +738,7 @@ public class TestDFSIO implements Tool {
 long skipSize = 0;
 String resFileName = DEFAULT_RES_FILE_NAME;
 String compressionClass = null;

hadoop git commit: MAPREDUCE-6578. Add support for HDFS heterogeneous storage testing to TestDFSIO. Contributed by Wei Zhou and Sammi Chen

2016-08-24 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha1 675dfbeed -> 39ca25a20


MAPREDUCE-6578. Add support for HDFS heterogeneous storage testing to 
TestDFSIO. Contributed by Wei Zhou and Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ca25a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ca25a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ca25a2

Branch: refs/heads/branch-3.0.0-alpha1
Commit: 39ca25a20343653428b321d3c6245a4c86ad66a7
Parents: 675dfbe
Author: Kai Zheng 
Authored: Wed Aug 24 22:23:41 2016 +0800
Committer: Kai Zheng 
Committed: Wed Aug 24 22:23:41 2016 +0800

--
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 53 +---
 1 file changed, 47 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ca25a2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index b1595e8..c609f3d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -29,6 +29,7 @@ import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.PrintStream;
 import java.text.DecimalFormat;
+import java.util.Collection;
 import java.util.Date;
 import java.util.Random;
 import java.util.StringTokenizer;
@@ -36,7 +37,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
@@ -102,7 +105,8 @@ public class TestDFSIO implements Tool {
 " [-compression codecClassName]" +
 " [-nrFiles N]" +
 " [-size Size[B|KB|MB|GB|TB]]" +
-" [-resFile resultFileName] [-bufferSize Bytes]";
+" [-resFile resultFileName] [-bufferSize Bytes]" +
+" [-storagePolicy storagePolicyName]";
 
   private Configuration config;
 
@@ -319,7 +323,7 @@ public class TestDFSIO implements Tool {
 writer = null;
   }
 }
-LOG.info("created control files for: "+nrFiles+" files");
+LOG.info("created control files for: " + nrFiles + " files");
   }
 
   private static String getFileName(int fIdx) {
@@ -340,6 +344,7 @@ public class TestDFSIO implements Tool {
*/
   private abstract static class IOStatMapper extends IOMapperBase {
 protected CompressionCodec compressionCodec;
+protected String blockStoragePolicy;
 
 IOStatMapper() {
 }
@@ -364,6 +369,8 @@ public class TestDFSIO implements Tool {
 compressionCodec = (CompressionCodec)
 ReflectionUtils.newInstance(codec, getConf());
   }
+
+  blockStoragePolicy = getConf().get("test.io.block.storage.policy", null);
 }
 
 @Override // IOMapperBase
@@ -403,8 +410,11 @@ public class TestDFSIO implements Tool {
 @Override // IOMapperBase
 public Closeable getIOStream(String name) throws IOException {
   // create file
-  OutputStream out =
-  fs.create(new Path(getDataDir(getConf()), name), true, bufferSize);
+  Path filePath = new Path(getDataDir(getConf()), name);
+  OutputStream out = fs.create(filePath, true, bufferSize);
+  if (blockStoragePolicy != null) {
+fs.setStoragePolicy(filePath, blockStoragePolicy);
+  }
   if(compressionCodec != null)
 out = compressionCodec.createOutputStream(out);
   LOG.info("out = " + out.getClass().getName());
@@ -713,8 +723,9 @@ public class TestDFSIO implements Tool {
   System.err.print(StringUtils.stringifyException(e));
   res = -2;
 }
-if(res == -1)
-  System.err.print(USAGE);
+if (res == -1) {
+  System.err.println(USAGE);
+}
 System.exit(res);
   }
 
@@ -727,6 +738,7 @@ public class TestDFSIO implements Tool {
 long skipSize = 0;
 String resFileName = DEFAULT_RES_FILE_NAME;
 Str

hadoop git commit: HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by Sammi Chen

2016-08-25 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk 81485dbfc -> f4a21d3ab


HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4a21d3a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4a21d3a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4a21d3a

Branch: refs/heads/trunk
Commit: f4a21d3abaa7c5a9f0a0d8417e81f7eaf3d1b29a
Parents: 81485db
Author: Kai Zheng 
Authored: Sat Aug 27 10:54:25 2016 +0800
Committer: Kai Zheng 
Committed: Sat Aug 27 10:54:25 2016 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a21d3a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
index d75a8ef..c984c3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
@@ -181,7 +181,7 @@ class ByteBufferStrategy implements ReaderStrategy {
int length) throws IOException {
 ByteBuffer tmpBuf = readBuf.duplicate();
 tmpBuf.limit(tmpBuf.position() + length);
-int nRead = blockReader.read(readBuf.slice());
+int nRead = blockReader.read(tmpBuf);
 // Only when data are read, update the position
 if (nRead > 0) {
   readBuf.position(readBuf.position() + nRead);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by Sammi Chen

2016-08-25 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-alpha1 4e9129b7e -> 743f418ae


HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/743f418a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/743f418a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/743f418a

Branch: refs/heads/branch-3.0.0-alpha1
Commit: 743f418ae05bd5b7583702773f1706ea7219c91a
Parents: 4e9129b
Author: Kai Zheng 
Authored: Sat Aug 27 10:56:12 2016 +0800
Committer: Kai Zheng 
Committed: Sat Aug 27 10:56:12 2016 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/743f418a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
index d75a8ef..c984c3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
@@ -181,7 +181,7 @@ class ByteBufferStrategy implements ReaderStrategy {
int length) throws IOException {
 ByteBuffer tmpBuf = readBuf.duplicate();
 tmpBuf.limit(tmpBuf.position() + length);
-int nRead = blockReader.read(readBuf.slice());
+int nRead = blockReader.read(tmpBuf);
 // Only when data are read, update the position
 if (nRead > 0) {
   readBuf.position(readBuf.position() + nRead);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDFS-10798. Make the threshold of reporting FSNamesystem lock contention configurable. Contributed by Erik Krogen.

2016-09-06 Thread drankye
HDFS-10798. Make the threshold of reporting FSNamesystem lock contention 
configurable. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/407b519f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/407b519f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/407b519f

Branch: refs/heads/HADOOP-12756
Commit: 407b519fb14f79f19ebc4fbdf08204336a7acf77
Parents: 8b7adf4
Author: Zhe Zhang 
Authored: Fri Aug 26 14:19:55 2016 -0700
Committer: Zhe Zhang 
Committed: Fri Aug 26 14:19:55 2016 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  5 +
 .../hadoop/hdfs/server/namenode/FSNamesystem.java| 10 --
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  9 +
 .../hdfs/server/namenode/TestFSNamesystem.java   | 15 +--
 4 files changed, 31 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/407b519f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3385751..b4cce4a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -407,6 +407,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long
   DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT = 25;
 
+  // Threshold for how long a write lock must be held for the event to be 
logged
+  public static final String  
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
+  "dfs.namenode.write-lock-reporting-threshold-ms";
+  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
+
   public static final String  DFS_UPGRADE_DOMAIN_FACTOR = 
"dfs.namenode.upgrade.domain.factor";
   public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = 
DFS_REPLICATION_DEFAULT;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/407b519f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 29f09b9..05fd709 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -71,6 +71,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
@@ -819,6 +821,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_KEY,
   DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT);
 
+  this.writeLockReportingThreshold = conf.getLong(
+  DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
+  DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+
   // For testing purposes, allow the DT secret manager to be started 
regardless
   // of whether security is enabled.
   alwaysUseDelegationTokensForTests = conf.getBoolean(
@@ -1498,7 +1504,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   /** Threshold (ms) for long holding write lock report. */
-  static final short WRITELOCK_REPORTING_THRESHOLD = 1000;
+  private long writeLockReportingThreshold;
   /** Last time stamp for write lock. Keep the longest one for 
multi-entrance.*/
   private long writeLockHeldTimeStamp;
 
@@ -

[07/50] [abbrv] hadoop git commit: HDFS-10584. Allow long-running Mover tool to login with keytab. Contributed by Rakesh R.

2016-09-06 Thread drankye
HDFS-10584. Allow long-running Mover tool to login with keytab. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e806db71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e806db71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e806db71

Branch: refs/heads/HADOOP-12756
Commit: e806db719053a5b2a7b14f47e6f2962e70008d25
Parents: a445b82
Author: Zhe Zhang 
Authored: Fri Aug 26 16:43:25 2016 -0700
Committer: Zhe Zhang 
Committed: Fri Aug 26 16:43:45 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  23 +-
 .../src/main/resources/hdfs-default.xml |  40 
 .../hadoop/hdfs/server/mover/TestMover.java | 212 +++
 4 files changed, 245 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806db71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b4cce4a..2eff3b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -487,6 +487,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_MOVER_MOVERTHREADS_DEFAULT = 1000;
   public static final String  DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY = 
"dfs.mover.retry.max.attempts";
   public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
+  public static final String  DFS_MOVER_KEYTAB_ENABLED_KEY =
+  "dfs.mover.keytab.enabled";
+  public static final boolean DFS_MOVER_KEYTAB_ENABLED_DEFAULT = false;
+  public static final String  DFS_MOVER_ADDRESS_KEY = "dfs.mover.address";
+  public static final String  DFS_MOVER_ADDRESS_DEFAULT= "0.0.0.0:0";
+  public static final String  DFS_MOVER_KEYTAB_FILE_KEY =
+  "dfs.mover.keytab.file";
+  public static final String  DFS_MOVER_KERBEROS_PRINCIPAL_KEY =
+  "dfs.mover.kerberos.principal";
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806db71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index cd37b15b..fdb6cfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -47,7 +47,10 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
@@ -57,6 +60,7 @@ import java.io.BufferedReader;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.text.DateFormat;
 import java.util.*;
@@ -579,6 +583,22 @@ public class Mover {
 }
   }
 
+  private static void checkKeytabAndInit(Configuration conf)
+  throws IOException {
+if (conf.getBoolean(DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_KEY,
+DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_DEFAULT)) {
+  LOG.info("Keytab is configured, will login using keytab.");
+  UserGroupInformation.setConfiguration(conf);
+  String addr = conf.get(DFSConfigKeys.DFS_MOVER_ADDRESS_KEY,
+  DFSConfigKeys.DFS_MOVER_ADDRESS_DEFAULT);
+  InetSocketAddress socAddr = NetUtils.createSocketAddr(addr, 0,
+  DFSConfigKeys.DFS_MOVER_ADDRESS_KEY);
+  SecurityUtil.login(conf, DFSConfigKeys.DFS_MOVER_KEYTAB_FILE_KEY,
+  DFSConfigKeys.DFS_MOVER_KERBEROS_PRINCIPAL_KEY,
+  socAddr.getHostName());
+}
+  }
+
   static int run(Map> namenodes, Configuration conf)
   t

[17/50] [abbrv] hadoop git commit: HADOOP-13559. Remove close() within try-with-resources in ChecksumFileSystem/ChecksumFs classes. (Contributed by Aaron Fabbri)

2016-09-06 Thread drankye
HADOOP-13559. Remove close() within try-with-resources in 
ChecksumFileSystem/ChecksumFs classes. (Contributed by Aaron Fabbri)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fcb04c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fcb04c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fcb04c1

Branch: refs/heads/HADOOP-12756
Commit: 6fcb04c1780ac3dca5b986f1bcd558fffccb3eb9
Parents: 69f7277
Author: Mingliang Liu 
Authored: Mon Aug 29 13:04:28 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 29 13:04:28 2016 -0700

--
 .../src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java  | 1 -
 .../src/main/java/org/apache/hadoop/fs/ChecksumFs.java  | 1 -
 2 files changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fcb04c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index e4c0b33..e0ce327 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -197,7 +197,6 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
new ChecksumFSInputChecker(fs, file)) {
 checker.seek(position);
 nread = checker.read(b, off, len);
-checker.close();
   }
   return nread;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fcb04c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 6e98db5..3972033 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -179,7 +179,6 @@ public abstract class ChecksumFs extends FilterFs {
new ChecksumFSInputChecker(fs, file)) {
 checker.seek(position);
 nread = checker.read(b, off, len);
-checker.close();
   }
   return nread;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HDFS-10652. Add a unit test for HDFS-4660. Contributed by Vinayakumar B., Wei-Chiu Chuang, Yongjun Zhang.

2016-09-06 Thread drankye
HDFS-10652. Add a unit test for HDFS-4660. Contributed by Vinayakumar B., 
Wei-Chiu Chuang, Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2581715
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2581715
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2581715

Branch: refs/heads/HADOOP-12756
Commit: c25817159af17753b398956cfe6ff14984801b01
Parents: 19c743c
Author: Yongjun Zhang 
Authored: Sat Aug 27 22:46:53 2016 -0700
Committer: Yongjun Zhang 
Committed: Sat Aug 27 22:51:31 2016 -0700

--
 .../hdfs/server/datanode/BlockReceiver.java |   1 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |   3 +-
 .../server/datanode/DataNodeFaultInjector.java  |   3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   2 +-
 .../TestClientProtocolForPipelineRecovery.java  | 138 ++-
 5 files changed, 144 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2581715/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index b6f0b01..522d577 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -1306,6 +1306,7 @@ class BlockReceiver implements Closeable {
   long ackRecvNanoTime = 0;
   try {
 if (type != PacketResponderType.LAST_IN_PIPELINE && !mirrorError) {
+  DataNodeFaultInjector.get().failPipeline(replicaInfo, 
mirrorAddr);
   // read an ack from downstream datanode
   ack.readFields(downstreamIn);
   ackRecvNanoTime = System.nanoTime();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2581715/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index ad3c172..08a1fc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2403,7 +2403,8 @@ public class DataNode extends ReconfigurableBase
 blockSender.sendBlock(out, unbufOut, null);
 
 // no response necessary
-LOG.info(getClass().getSimpleName() + ": Transmitted " + b
+LOG.info(getClass().getSimpleName() + ", at "
++ DataNode.this.getDisplayName() + ": Transmitted " + b
 + " (numBytes=" + b.getNumBytes() + ") to " + curTarget);
 
 // read ack

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2581715/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 4ecbdc0..931c124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -55,4 +55,7 @@ public class DataNodeFaultInjector {
   public void noRegistration() throws IOException { }
 
   public void failMirrorConnection() throws IOException { }
+
+  public void failPipeline(ReplicaInPipelineInterface replicaInfo,
+  String mirrorAddr) throws IOException { }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2581715/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index c4d924e..129024b 100644
--- 
a/hadoop-hdfs-project

[08/50] [abbrv] hadoop git commit: YARN-5327. API changes required to support recurring reservations in the YARN ReservationSystem. (Sangeetha Abdu Jyothi via Subru).

2016-09-06 Thread drankye
YARN-5327. API changes required to support recurring reservations in the YARN 
ReservationSystem. (Sangeetha Abdu Jyothi via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b930dc3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b930dc3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b930dc3e

Branch: refs/heads/HADOOP-12756
Commit: b930dc3ec06afa479a249490976e3e127d201706
Parents: e806db7
Author: Subru Krishnan 
Authored: Fri Aug 26 16:58:47 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Aug 26 16:58:47 2016 -0700

--
 .../yarn/api/records/ReservationDefinition.java | 49 +++-
 .../src/main/proto/yarn_protos.proto|  1 +
 .../impl/pb/ReservationDefinitionPBImpl.java| 21 ++-
 .../reservation/ReservationInputValidator.java  | 14 +
 .../TestReservationInputValidator.java  | 61 
 5 files changed, 143 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b930dc3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
index 10f592a..8ef881b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
@@ -37,13 +37,24 @@ public abstract class ReservationDefinition {
   @Public
   @Unstable
   public static ReservationDefinition newInstance(long arrival, long deadline,
-  ReservationRequests reservationRequests, String name) {
+  ReservationRequests reservationRequests, String name,
+  String recurrenceExpression) {
 ReservationDefinition rDefinition =
 Records.newRecord(ReservationDefinition.class);
 rDefinition.setArrival(arrival);
 rDefinition.setDeadline(deadline);
 rDefinition.setReservationRequests(reservationRequests);
 rDefinition.setReservationName(name);
+rDefinition.setRecurrenceExpression(recurrenceExpression);
+return rDefinition;
+  }
+
+  @Public
+  @Unstable
+  public static ReservationDefinition newInstance(long arrival, long deadline,
+  ReservationRequests reservationRequests, String name) {
+ReservationDefinition rDefinition =
+newInstance(arrival, deadline, reservationRequests, name, "0");
 return rDefinition;
   }
 
@@ -134,4 +145,40 @@ public abstract class ReservationDefinition {
   @Evolving
   public abstract void setReservationName(String name);
 
+  /**
+   * Get the recurrence of this reservation representing the time period of
+   * the periodic job. Currently, only long values are supported. Later,
+   * support for regular expressions denoting arbitrary recurrence patterns
+   * (e.g., every Tuesday and Thursday) will be added.
+   * Recurrence is represented in milliseconds for periodic jobs.
+   * Recurrence is 0 for non-periodic jobs. Periodic jobs are valid until they
+   * are explicitly cancelled and have higher priority than non-periodic jobs
+   * (during initial placement and replanning). Periodic job allocations are
+   * consistent across runs (flexibility in allocation is leveraged only during
+   * initial placement, allocations remain consistent thereafter).
+   *
+   * @return recurrence of this reservation
+   */
+  @Public
+  @Evolving
+  public abstract String getRecurrenceExpression();
+
+  /**
+   * Set the recurrence of this reservation representing the time period of
+   * the periodic job. Currently, only long values are supported. Later,
+   * support for regular expressions denoting arbitrary recurrence patterns
+   * (e.g., every Tuesday and Thursday) will be added.
+   * Recurrence is represented in milliseconds for periodic jobs.
+   * Recurrence is 0 for non-periodic jobs. Periodic jobs are valid until they
+   * are explicitly cancelled and have higher priority than non-periodic jobs
+   * (during initial placement and replanning). Periodic job allocations are
+   * consistent across runs (flexibility in allocation is leveraged only during
+   * initial placement, allocations remain consistent thereafter).
+   *
+   * @param recurrenceExpression recurrence interval of this reservation
+   */
+  @Public
+  @Evolving
+  public abstract void setRecurrenceExpression(String recurrenceExpression);
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b93

[25/50] [abbrv] hadoop git commit: HDFS-10760. DataXceiver#run() should not log InvalidToken exception as an error. Contributed by Pan Yuxuan.

2016-09-06 Thread drankye
HDFS-10760. DataXceiver#run() should not log InvalidToken exception as an 
error. Contributed by Pan Yuxuan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4ee6915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4ee6915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4ee6915

Branch: refs/heads/HADOOP-12756
Commit: c4ee6915a14e00342755d7cdcbf2d61518f306aa
Parents: af50860
Author: Wei-Chiu Chuang 
Authored: Tue Aug 30 10:43:20 2016 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 30 10:43:20 2016 -0700

--
 .../org/apache/hadoop/hdfs/server/datanode/DataXceiver.java| 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ee6915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index c2cf76e..fee16b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -312,6 +312,12 @@ class DataXceiver extends Receiver implements Runnable {
 } else {
   LOG.info(s1 + "; " + t);  
 }
+  } else if (t instanceof InvalidToken) {
+// The InvalidToken exception has already been logged in
+// checkAccess() method and this is not a server error.
+if (LOG.isTraceEnabled()) {
+  LOG.trace(s, t);
+}
   } else {
 LOG.error(s, t);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: HDFS-10793. Fix HdfsAuditLogger binary incompatibility introduced by HDFS-9184. Contributed by Manoj Govindassamy.

2016-09-06 Thread drankye
HDFS-10793. Fix HdfsAuditLogger binary incompatibility introduced by HDFS-9184. 
Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a445b82b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a445b82b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a445b82b

Branch: refs/heads/HADOOP-12756
Commit: a445b82baaa58dcaaa3831e724a9915e9dde57c5
Parents: 407b519
Author: Andrew Wang 
Authored: Fri Aug 26 15:39:18 2016 -0700
Committer: Andrew Wang 
Committed: Fri Aug 26 15:39:18 2016 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 10 +
 .../hdfs/server/namenode/HdfsAuditLogger.java   | 23 
 2 files changed, 29 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a445b82b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 05fd709..52fbaa7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7010,6 +7010,16 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 logAuditMessage(sb.toString());
   }
 }
+
+@Override
+public void logAuditEvent(boolean succeeded, String userName,
+InetAddress addr, String cmd, String src, String dst,
+FileStatus status, UserGroupInformation ugi,
+DelegationTokenSecretManager dtSecretManager) {
+  this.logAuditEvent(succeeded, userName, addr, cmd, src, dst, status,
+  null /*CallerContext*/, ugi, dtSecretManager);
+}
+
 public void logAuditMessage(String message) {
   auditLog.info(message);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a445b82b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java
index 3e95ce1..894c4df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java
@@ -43,9 +43,9 @@ public abstract class HdfsAuditLogger implements AuditLogger {
 
   /**
* Same as
-   * {@link #logAuditEvent(boolean, String, InetAddress, String, String, 
String, FileStatus)}
-   * with additional parameters related to logging delegation token tracking
-   * IDs.
+   * {@link #logAuditEvent(boolean, String, InetAddress, String, String, 
String,
+   * FileStatus)} with additional parameters related to logging delegation 
token
+   * tracking IDs.
* 
* @param succeeded Whether authorization succeeded.
* @param userName Name of the user executing the request.
@@ -55,13 +55,28 @@ public abstract class HdfsAuditLogger implements 
AuditLogger {
* @param dst Path of affected destination file (if any).
* @param stat File information for operations that change the file's 
metadata
*  (permissions, owner, times, etc).
+   * @param callerContext Context information of the caller
* @param ugi UserGroupInformation of the current user, or null if not 
logging
*  token tracking information
* @param dtSecretManager The token secret manager, or null if not logging
*  token tracking information
*/
-  public abstract void logAuditEvent(boolean succeeded, String userName,
+  public void logAuditEvent(boolean succeeded, String userName,
   InetAddress addr, String cmd, String src, String dst,
   FileStatus stat, CallerContext callerContext, UserGroupInformation ugi,
+  DelegationTokenSecretManager dtSecretManager) {
+logAuditEvent(succeeded, userName, addr, cmd, src, dst, stat,
+  ugi, dtSecretManager);
+  }
+
+  /**
+   * Same as
+   * {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+   * String, FileStatus, CallerContext, UserGroupInformation,
+   * DelegationTokenSecretManager)} without {@link CallerContext} information.
+   */
+  public abstract void logAuditEvent(boolean succeeded, String userName,
+  InetAddress addr

[15/50] [abbrv] hadoop git commit: HDFS-10807. Doc about upgrading to a version of HDFS with snapshots may be confusing. (Contributed by Mingliang Liu)

2016-09-06 Thread drankye
HDFS-10807. Doc about upgrading to a version of HDFS with snapshots may be 
confusing. (Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6742fb6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6742fb6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6742fb6e

Branch: refs/heads/HADOOP-12756
Commit: 6742fb6e68d349055f985eb640d845e689d75384
Parents: e1ad598
Author: Mingliang Liu 
Authored: Mon Aug 29 10:15:34 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 29 10:15:34 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6742fb6e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
index 94a37cd..d856e8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
@@ -113,7 +113,7 @@ Upgrading to a version of HDFS with snapshots
 
 The HDFS snapshot feature introduces a new reserved path name used to
 interact with snapshots: `.snapshot`. When upgrading from an
-older version of HDFS, existing paths named `.snapshot` need
+older version of HDFS which does not support snapshots, existing paths named 
`.snapshot` need
 to first be renamed or deleted to avoid conflicting with the reserved path.
 See the upgrade section in
 [the HDFS user guide](HdfsUserGuide.html#Upgrade_and_Rollback)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by Sammi Chen

2016-09-06 Thread drankye
HDFS-10795. Fix an error in ReaderStrategy#ByteBufferStrategy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4a21d3a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4a21d3a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4a21d3a

Branch: refs/heads/HADOOP-12756
Commit: f4a21d3abaa7c5a9f0a0d8417e81f7eaf3d1b29a
Parents: 81485db
Author: Kai Zheng 
Authored: Sat Aug 27 10:54:25 2016 +0800
Committer: Kai Zheng 
Committed: Sat Aug 27 10:54:25 2016 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4a21d3a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
index d75a8ef..c984c3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReaderStrategy.java
@@ -181,7 +181,7 @@ class ByteBufferStrategy implements ReaderStrategy {
int length) throws IOException {
 ByteBuffer tmpBuf = readBuf.duplicate();
 tmpBuf.limit(tmpBuf.position() + length);
-int nRead = blockReader.read(readBuf.slice());
+int nRead = blockReader.read(tmpBuf);
 // Only when data are read, update the position
 if (nRead > 0) {
   readBuf.position(readBuf.position() + nRead);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-4889. Changes in AMRMClient for identifying resource-requests explicitly. (Arun Suresh via wangda)

2016-09-06 Thread drankye
YARN-4889. Changes in AMRMClient for identifying resource-requests explicitly. 
(Arun Suresh via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19c743c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19c743c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19c743c1

Branch: refs/heads/HADOOP-12756
Commit: 19c743c1bbcaf3df8f1d63e557143c960a538c42
Parents: b930dc3
Author: Wangda Tan 
Authored: Fri Aug 26 16:48:00 2016 -0700
Committer: Wangda Tan 
Committed: Fri Aug 26 17:14:12 2016 -0700

--
 .../yarn/api/records/ResourceRequest.java   |  19 +-
 .../hadoop/yarn/client/api/AMRMClient.java  | 132 +++-
 .../yarn/client/api/async/AMRMClientAsync.java  |  18 ++
 .../yarn/client/api/impl/AMRMClientImpl.java| 163 ++-
 .../client/api/impl/RemoteRequestsTable.java|  11 +-
 .../yarn/client/api/impl/TestAMRMClient.java| 200 ---
 .../impl/TestAMRMClientContainerRequest.java|   7 +-
 .../api/impl/TestDistributedScheduling.java |  24 ++-
 .../yarn/client/api/impl/TestNMClient.java  |   6 +-
 9 files changed, 468 insertions(+), 112 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19c743c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index 07f132c..2d6f0f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -112,6 +112,10 @@ public abstract class ResourceRequest implements 
Comparable {
   // Compare priority, host and capability
   int ret = r1.getPriority().compareTo(r2.getPriority());
   if (ret == 0) {
+ret = Long.compare(
+r1.getAllocationRequestId(), r2.getAllocationRequestId());
+  }
+  if (ret == 0) {
 String h1 = r1.getResourceName();
 String h2 = r2.getResourceName();
 ret = h1.compareTo(h2);
@@ -381,6 +385,7 @@ public abstract class ResourceRequest implements 
Comparable {
 result = prime * result + ((hostName == null) ? 0 : hostName.hashCode());
 result = prime * result + getNumContainers();
 result = prime * result + ((priority == null) ? 0 : priority.hashCode());
+result = prime * result + 
Long.valueOf(getAllocationRequestId()).hashCode();
 return result;
   }
 
@@ -422,6 +427,11 @@ public abstract class ResourceRequest implements 
Comparable {
 .equals(other.getExecutionTypeRequest().getExecutionType())) {
   return false;
 }
+
+if (getAllocationRequestId() != other.getAllocationRequestId()) {
+  return false;
+}
+
 if (getNodeLabelExpression() == null) {
   if (other.getNodeLabelExpression() != null) {
 return false;
@@ -452,7 +462,14 @@ public abstract class ResourceRequest implements 
Comparable {
   int capabilityComparison =
   this.getCapability().compareTo(other.getCapability());
   if (capabilityComparison == 0) {
-return this.getNumContainers() - other.getNumContainers();
+int numContainerComparison =
+this.getNumContainers() - other.getNumContainers();
+if (numContainerComparison == 0) {
+  return Long.compare(getAllocationRequestId(),
+  other.getAllocationRequestId());
+} else {
+  return numContainerComparison;
+}
   } else {
 return capabilityComparison;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19c743c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 79d587a..2990c05 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -110,6 +110,7 @@ public abstract class AMRMClient extends
 final L

[12/50] [abbrv] hadoop git commit: HADOOP-13552. RetryInvocationHandler logs all remote exceptions. Contributed by Jason Lowe

2016-09-06 Thread drankye
HADOOP-13552. RetryInvocationHandler logs all remote exceptions. Contributed by 
Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92d8f371
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92d8f371
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92d8f371

Branch: refs/heads/HADOOP-12756
Commit: 92d8f371553b88e5b3a9d3354e93f75d60d81368
Parents: c258171
Author: Jason Lowe 
Authored: Mon Aug 29 15:55:38 2016 +
Committer: Jason Lowe 
Committed: Mon Aug 29 15:55:38 2016 +

--
 .../org/apache/hadoop/io/retry/RetryInvocationHandler.java   | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92d8f371/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 7bd3a15..c657d20 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -351,9 +351,11 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 if (retryInfo.isFail()) {
   // fail.
   if (retryInfo.action.reason != null) {
-LOG.warn("Exception while invoking call #" + callId + " "
-+ proxyDescriptor.getProxyInfo().getString(method.getName())
-+ ". Not retrying because " + retryInfo.action.reason, e);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Exception while invoking call #" + callId + " "
+  + proxyDescriptor.getProxyInfo().getString(method.getName())
+  + ". Not retrying because " + retryInfo.action.reason, e);
+}
   }
   throw e;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: HDFS-10625. VolumeScanner to report why a block is found bad. Contributed by Rushabh S Shah and Yiqun Lin.

2016-09-06 Thread drankye
HDFS-10625. VolumeScanner to report why a block is found bad. Contributed by 
Rushabh S Shah and Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d1609dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d1609dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d1609dd

Branch: refs/heads/HADOOP-12756
Commit: 5d1609ddf275e4907bd224bf618e2aad4b262888
Parents: 6fcb04c
Author: Yongjun Zhang 
Authored: Mon Aug 29 13:58:05 2016 -0700
Committer: Yongjun Zhang 
Committed: Mon Aug 29 13:59:54 2016 -0700

--
 .../apache/hadoop/hdfs/server/datanode/BlockSender.java | 12 +---
 .../hadoop/hdfs/server/datanode/VolumeScanner.java  |  5 +++--
 2 files changed, 12 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d1609dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 7c3d778..9d9502b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -156,6 +156,9 @@ class BlockSender implements java.io.Closeable {
   /** The reference to the volume where the block is located */
   private FsVolumeReference volumeRef;
 
+  /** The replica of the block that is being read. */
+  private final Replica replica;
+
   // Cache-management related fields
   private final long readaheadLength;
 
@@ -238,7 +241,6 @@ class BlockSender implements java.io.Closeable {
 "If verifying checksum, currently must also send it.");
   }
   
-  final Replica replica;
   final long replicaVisibleLength;
   try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) {
 replica = getReplica(block, datanode);
@@ -688,8 +690,12 @@ class BlockSender implements java.io.Closeable {
   checksum.update(buf, dOff, dLen);
   if (!checksum.compare(buf, cOff)) {
 long failedPos = offset + datalen - dLeft;
-throw new ChecksumException("Checksum failed at " + failedPos,
-failedPos);
+StringBuilder replicaInfoString = new StringBuilder();
+if (replica != null) {
+  replicaInfoString.append(" for replica: " + replica.toString());
+}
+throw new ChecksumException("Checksum failed at " + failedPos
++ replicaInfoString, failedPos);
   }
   dLeft -= dLen;
   dOff += dLen;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d1609dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 7a9ecf2..3416b53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -281,12 +281,13 @@ public class VolumeScanner extends Thread {
 volume.getBasePath(), block);
 return;
   }
-  LOG.warn("Reporting bad {} on {}", block, volume.getBasePath());
+  LOG.warn("Reporting bad " + block + " with volume "
+  + volume.getBasePath(), e);
   try {
 scanner.datanode.reportBadBlocks(block, volume);
   } catch (IOException ie) {
 // This is bad, but not bad enough to shut down the scanner.
-LOG.warn("Cannot report bad " + block.getBlockId(), e);
+LOG.warn("Cannot report bad block " + block, ie);
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: YARN-5557. Add localize API to the ContainerManagementProtocol. Contributed by Jian He.

2016-09-06 Thread drankye
YARN-5557. Add localize API to the ContainerManagementProtocol. Contributed by 
Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ef632f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ef632f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ef632f3

Branch: refs/heads/HADOOP-12756
Commit: 9ef632f3b0b0e0808116cd1c7482a205b7ebef7d
Parents: 13fb1b5
Author: Junping Du 
Authored: Fri Aug 26 09:03:44 2016 -0700
Committer: Junping Du 
Committed: Fri Aug 26 09:04:44 2016 -0700

--
 .../v2/app/launcher/TestContainerLauncher.java  |   8 +
 .../app/launcher/TestContainerLauncherImpl.java |   8 +
 .../yarn/api/ContainerManagementProtocol.java   |  16 ++
 .../ResourceLocalizationRequest.java|  84 
 .../ResourceLocalizationResponse.java   |  40 
 .../proto/containermanagement_protocol.proto|   1 +
 .../src/main/proto/yarn_service_protos.proto|   7 +
 ...ContainerManagementProtocolPBClientImpl.java |  38 +++-
 ...ontainerManagementProtocolPBServiceImpl.java |  20 ++
 .../pb/ResourceLocalizationRequestPBImpl.java   | 216 +++
 .../pb/ResourceLocalizationResponsePBImpl.java  |  69 ++
 .../hadoop/yarn/TestContainerLaunchRPC.java |   8 +
 .../yarn/TestContainerResourceIncreaseRPC.java  |   7 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |  15 ++
 .../java/org/apache/hadoop/yarn/TestRPC.java|   8 +
 .../containermanager/ContainerManagerImpl.java  |  10 +
 .../nodemanager/DummyContainerManager.java  |   8 +
 .../server/resourcemanager/NodeManager.java |   8 +
 .../resourcemanager/TestAMAuthorization.java|   8 +
 .../TestApplicationMasterLauncher.java  |   8 +
 20 files changed, 577 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef632f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
index f1c5b77..ba404a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
@@ -32,6 +32,8 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import 
org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationResponse;
 import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -468,5 +470,11 @@ public class TestContainerLauncher {
 SignalContainerRequest request) throws YarnException, IOException {
   return null;
 }
+
+@Override
+public ResourceLocalizationResponse localize(
+ResourceLocalizationRequest request) throws YarnException, IOException 
{
+  return null;
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef632f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
index d04f08c..be1cad9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
@@ -50,6 +50,8 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequ
 import 
org.apache.hadoop.yarn.api.protocolrecor

[32/50] [abbrv] hadoop git commit: HDFS-10813. DiskBalancer: Add the getNodeList method in Command. Contributed by Yiqun Lin.

2016-09-06 Thread drankye
HDFS-10813. DiskBalancer: Add the getNodeList method in Command. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20ae1fa2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20ae1fa2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20ae1fa2

Branch: refs/heads/HADOOP-12756
Commit: 20ae1fa259b36a7bc11b0f8de1ebf753c858f93c
Parents: d6d9cff
Author: Anu Engineer 
Authored: Tue Aug 30 18:42:55 2016 -0700
Committer: Anu Engineer 
Committed: Tue Aug 30 18:42:55 2016 -0700

--
 .../server/diskbalancer/command/Command.java| 44 +++-
 .../command/TestDiskBalancerCommand.java| 22 ++
 2 files changed, 65 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20ae1fa2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index a1c15ae..5acd0ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -18,7 +18,10 @@
 
 package org.apache.hadoop.hdfs.server.diskbalancer.command;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.lang.StringUtils;
@@ -221,7 +224,7 @@ public abstract class Command extends Configured {
* @return Set of node names
* @throws IOException
*/
-  private Set getNodeList(String listArg) throws IOException {
+  protected Set getNodeList(String listArg) throws IOException {
 URL listURL;
 String nodeData;
 Set resultSet = new TreeSet<>();
@@ -243,6 +246,37 @@ public abstract class Command extends Configured {
   }
 
   /**
+   * Returns a DiskBalancer Node list from the Cluster or null if not found.
+   *
+   * @param listArg String File URL or a comma separated list of node names.
+   * @return List of DiskBalancer Node
+   * @throws IOException
+   */
+  protected List getNodes(String listArg)
+  throws IOException {
+Set nodeNames = null;
+List nodeList = Lists.newArrayList();
+
+if ((listArg == null) || listArg.isEmpty()) {
+  return nodeList;
+}
+nodeNames = getNodeList(listArg);
+
+DiskBalancerDataNode node = null;
+if (!nodeNames.isEmpty()) {
+  for (String name : nodeNames) {
+node = getNode(name);
+
+if (node != null) {
+  nodeList.add(node);
+}
+  }
+}
+
+return nodeList;
+  }
+
+  /**
* Verifies if the command line options are sane.
*
* @param commandName - Name of the command
@@ -471,4 +505,12 @@ public abstract class Command extends Configured {
   public int getTopNodes() {
 return topNodes;
   }
+
+  /**
+   * Set DiskBalancer cluster
+   */
+  @VisibleForTesting
+  public void setCluster(DiskBalancerCluster newCluster) {
+this.cluster = newCluster;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20ae1fa2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 0d24f28..7d659af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
+import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -435,4 +436,25 @@ public class TestDiskBalancerCommand {
   miniDFS

[33/50] [abbrv] hadoop git commit: HDFS-10729. Improve log message for edit loading failures caused by FS limit checks. Contributed by Wei-Chiu Chuang.

2016-09-06 Thread drankye
HDFS-10729. Improve log message for edit loading failures caused by FS limit 
checks. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01721dd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01721dd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01721dd8

Branch: refs/heads/HADOOP-12756
Commit: 01721dd88ee532d20eda841254437da4dfd69db5
Parents: 20ae1fa
Author: Kihwal Lee 
Authored: Wed Aug 31 14:02:37 2016 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 31 14:02:37 2016 -0500

--
 .../hadoop/hdfs/server/namenode/FSDirWriteFileOp.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01721dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index cb639b1..077f04f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -506,10 +507,13 @@ class FSDirWriteFileOp {
 return newNode;
   }
 } catch (IOException e) {
-  if(NameNode.stateChangeLog.isDebugEnabled()) {
-NameNode.stateChangeLog.debug(
-"DIR* FSDirectory.unprotectedAddFile: exception when add "
-+ existing.getPath() + " to the file system", e);
+  NameNode.stateChangeLog.warn(
+  "DIR* FSDirectory.unprotectedAddFile: exception when add " + existing
+  .getPath() + " to the file system", e);
+  if (e instanceof FSLimitException.MaxDirectoryItemsExceededException) {
+NameNode.stateChangeLog.warn("Please increase "
++ "dfs.namenode.fs-limits.max-directory-items and make it "
++ "consistent across all NameNodes.");
   }
 }
 return null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: Addendum fix for HDFS-10817 to fix failure of the added testFSReadLockLongHoldingReport in branch-2.

2016-09-06 Thread drankye
Addendum fix for HDFS-10817 to fix failure of the added 
testFSReadLockLongHoldingReport in branch-2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c600360
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c600360
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c600360

Branch: refs/heads/HADOOP-12756
Commit: 6c600360ca469d5fe0f017d681585db06c80c9cc
Parents: 6f4b0d3
Author: Zhe Zhang 
Authored: Wed Aug 31 23:43:59 2016 -0700
Committer: Zhe Zhang 
Committed: Wed Aug 31 23:43:59 2016 -0700

--
 .../hdfs/server/namenode/TestFSNamesystem.java  | 50 +++-
 1 file changed, 6 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c600360/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index ccc8a6e..b85b6aba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -59,6 +59,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeoutException;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 public class TestFSNamesystem {
@@ -431,54 +432,15 @@ public class TestFSNamesystem {
 t2.start();
 t1.join();
 t2.join();
+// Look for the differentiating class names in the stack trace
+String stackTracePatternString =
+String.format("INFO.+%s(.+\n){4}\\Q%%s\\E\\.run", readLockLogStmt);
 Pattern t1Pattern = Pattern.compile(
-String.format("\\Q%s\\E.+%s", t1.getName(), readLockLogStmt));
+String.format(stackTracePatternString, t1.getClass().getName()));
 assertTrue(t1Pattern.matcher(logs.getOutput()).find());
 Pattern t2Pattern = Pattern.compile(
-String.format("\\Q%s\\E.+%s", t2.getName(), readLockLogStmt));
+String.format(stackTracePatternString, t2.getClass().getName()));
 assertFalse(t2Pattern.matcher(logs.getOutput()).find());
-
-// Spin up a bunch of threads all grabbing the lock at once; assign some
-// to go over threshold and some under. Check that they all log correctly.
-logs.clearOutput();
-final int threadCount = 50;
-List threads = new ArrayList<>(threadCount);
-for (int i = 0; i < threadCount; i++) {
-  threads.add(new Thread() {
-@Override
-public void run() {
-  try {
-long sleepTime;
-if (this.getName().hashCode() % 2 == 0) {
-  sleepTime = readLockReportingThreshold + 10;
-} else {
-  sleepTime = readLockReportingThreshold / 2;
-}
-fsn.readLock();
-Thread.sleep(sleepTime);
-fsn.readUnlock();
-  } catch (InterruptedException e) {
-fail("Interrupted during testing");
-  }
-}
-  });
-}
-for (Thread t : threads) {
-  t.start();
-}
-for (Thread t : threads) {
-  t.join();
-}
-for (Thread t : threads) {
-  Pattern p = Pattern.compile(
-  String.format("\\Q%s\\E.+%s", t.getName(), readLockLogStmt));
-  boolean foundLog = p.matcher(logs.getOutput()).find();
-  if (t.getName().hashCode() % 2 == 0) {
-assertTrue(foundLog);
-  } else {
-assertFalse(foundLog);
-  }
-}
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-9392. Admins support for maintenance state. Contributed by Ming Ma.

2016-09-06 Thread drankye
HDFS-9392. Admins support for maintenance state. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dcbdbdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dcbdbdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dcbdbdb

Branch: refs/heads/HADOOP-12756
Commit: 9dcbdbdb5a34d85910707f81ebc1bb1f81c99978
Parents: c4ee691
Author: Ming Ma 
Authored: Tue Aug 30 14:00:13 2016 -0700
Committer: Ming Ma 
Committed: Tue Aug 30 14:00:13 2016 -0700

--
 .../hdfs/protocol/DatanodeAdminProperties.java  |  19 +
 .../hadoop/hdfs/protocol/DatanodeInfo.java  |  27 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../CombinedHostFileManager.java|  23 +
 .../server/blockmanagement/DatanodeManager.java |  33 +-
 .../server/blockmanagement/DatanodeStats.java   |  10 +-
 .../blockmanagement/DecommissionManager.java| 101 +++-
 .../blockmanagement/HeartbeatManager.java   |  27 +
 .../blockmanagement/HostConfigManager.java  |   7 +
 .../server/blockmanagement/HostFileManager.java |   6 +
 .../hdfs/server/namenode/FSNamesystem.java  |  29 +
 .../namenode/metrics/FSNamesystemMBean.java |  15 +
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java | 375 
 .../apache/hadoop/hdfs/TestDecommission.java| 592 ++-
 .../hadoop/hdfs/TestMaintenanceState.java   | 310 ++
 .../namenode/TestDecommissioningStatus.java |   2 +-
 .../hadoop/hdfs/util/HostsFileWriter.java   |  55 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  |   2 +-
 .../src/test/resources/dfs.hosts.json   |   2 +
 19 files changed, 1165 insertions(+), 472 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
index 9f7b983..2abed81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
@@ -33,6 +33,7 @@ public class DatanodeAdminProperties {
   private int port;
   private String upgradeDomain;
   private AdminStates adminState = AdminStates.NORMAL;
+  private long maintenanceExpireTimeInMS = Long.MAX_VALUE;
 
   /**
* Return the host name of the datanode.
@@ -97,4 +98,22 @@ public class DatanodeAdminProperties {
   public void setAdminState(final AdminStates adminState) {
 this.adminState = adminState;
   }
+
+  /**
+   * Get the maintenance expiration time in milliseconds.
+   * @return the maintenance expiration time in milliseconds.
+   */
+  public long getMaintenanceExpireTimeInMS() {
+return this.maintenanceExpireTimeInMS;
+  }
+
+  /**
+   * Get the maintenance expiration time in milliseconds.
+   * @param maintenanceExpireTimeInMS
+   *the maintenance expiration time in milliseconds.
+   */
+  public void setMaintenanceExpireTimeInMS(
+  final long maintenanceExpireTimeInMS) {
+this.maintenanceExpireTimeInMS = maintenanceExpireTimeInMS;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index e04abdd..cd32a53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -83,6 +83,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
 
   protected AdminStates adminState;
+  private long maintenanceExpireTimeInMS;
 
   public DatanodeInfo(DatanodeInfo from) {
 super(from);
@@ -499,17 +500,28 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   }
 
   /**
-   * Put a node to maintenance mode.
+   * Start the maintenance operation.
*/
   public void startMaintenance() {
-adminState = AdminStates.ENTERING_MAINTENANCE;
+this.adminState = AdminStates.ENTERING_MAINTENANCE;
   }
 
   /**
-   * Put a node to maintenance mode.
+   * Put a

[35/50] [abbrv] hadoop git commit: HDFS-10817. Add Logging for Long-held NN Read Locks. Contributed by Erik Krogen.

2016-09-06 Thread drankye
HDFS-10817. Add Logging for Long-held NN Read Locks. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f4b0d33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f4b0d33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f4b0d33

Branch: refs/heads/HADOOP-12756
Commit: 6f4b0d33ca339e3724623a1d23c101f8cfd3cdd5
Parents: 85bab5f
Author: Zhe Zhang 
Authored: Wed Aug 31 15:40:01 2016 -0700
Committer: Zhe Zhang 
Committed: Wed Aug 31 15:40:09 2016 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  32 +
 .../src/main/resources/hdfs-default.xml |   9 ++
 .../hdfs/server/namenode/TestFSNamesystem.java  | 144 ++-
 4 files changed, 188 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f4b0d33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2eff3b0..f2ab321 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -407,10 +407,14 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final long
   DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT = 25;
 
-  // Threshold for how long a write lock must be held for the event to be 
logged
+  // Threshold for how long namenode locks must be held for the
+  // event to be logged
   public static final String  
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.write-lock-reporting-threshold-ms";
   public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
+  public static final String  
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
+  "dfs.namenode.read-lock-reporting-threshold-ms";
+  public static final long
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
 
   public static final String  DFS_UPGRADE_DOMAIN_FACTOR = 
"dfs.namenode.upgrade.domain.factor";
   public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = 
DFS_REPLICATION_DEFAULT;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f4b0d33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f4b742e..3b14eec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -73,6 +73,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_I
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
@@ -824,6 +826,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   this.writeLockReportingThreshold = conf.getLong(
   DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
   DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+  this.readLockReportingThreshold = conf.getLong(
+  DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+  DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
 
   // For testing purposes, allow the DT secret manager to be started 
regardless
   // of whether security is enabled.
@@ -1507,14 +1512,41 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   private lo

[31/50] [abbrv] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

2016-09-06 Thread drankye
YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change 
in container properties. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d9cff2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d9cff2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d9cff2

Branch: refs/heads/HADOOP-12756
Commit: d6d9cff21b7b6141ed88359652cf22e8973c0661
Parents: 9dcbdbd
Author: Arun Suresh 
Authored: Sat Aug 27 15:22:43 2016 -0700
Committer: Arun Suresh 
Committed: Tue Aug 30 15:52:29 2016 -0700

--
 .../app/local/TestLocalContainerAllocator.java  |   4 +-
 .../v2/app/rm/TestRMContainerAllocator.java |  10 +-
 .../sls/scheduler/ResourceSchedulerWrapper.java |   6 +-
 .../sls/scheduler/SLSCapacityScheduler.java |   6 +-
 .../api/protocolrecords/AllocateRequest.java|  64 +---
 .../api/protocolrecords/AllocateResponse.java   |  76 +++--
 .../hadoop/yarn/api/records/Container.java  |  24 +-
 .../records/ContainerResourceChangeRequest.java | 117 ---
 .../yarn/api/records/ContainerUpdateType.java   |  45 +++
 .../yarn/api/records/UpdateContainerError.java  | 119 +++
 .../api/records/UpdateContainerRequest.java | 218 
 .../yarn/api/records/UpdatedContainer.java  | 118 +++
 .../src/main/proto/yarn_protos.proto|   6 +-
 .../src/main/proto/yarn_service_protos.proto|  31 +-
 .../distributedshell/ApplicationMaster.java |   4 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |   9 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   8 +-
 .../yarn/client/api/impl/AMRMClientImpl.java|  84 ++---
 .../api/async/impl/TestAMRMClientAsync.java |  55 +--
 .../yarn/client/api/impl/TestAMRMClient.java|  42 +--
 .../api/impl/TestAMRMClientOnRMRestart.java |  14 +-
 .../impl/pb/AllocateRequestPBImpl.java  | 151 +++--
 .../impl/pb/AllocateResponsePBImpl.java | 192 ---
 .../api/records/impl/pb/ContainerPBImpl.java|  13 +
 .../ContainerResourceChangeRequestPBImpl.java   | 141 
 .../yarn/api/records/impl/pb/ProtoUtils.java|  69 +++-
 .../impl/pb/UpdateContainerErrorPBImpl.java | 125 +++
 .../impl/pb/UpdateContainerRequestPBImpl.java   | 187 ++
 .../records/impl/pb/UpdatedContainerPBImpl.java | 117 +++
 .../yarn/security/ContainerTokenIdentifier.java |  29 +-
 .../src/main/proto/yarn_security_token.proto|   1 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |  17 +-
 .../yarn/security/TestYARNTokenIdentifier.java  |   4 +-
 .../api/protocolrecords/NMContainerStatus.java  |  15 +-
 .../impl/pb/NMContainerStatusPBImpl.java|  13 +
 .../OpportunisticContainerAllocator.java|   2 +-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  14 +-
 .../yarn_server_common_service_protos.proto |   1 +
 .../protocolrecords/TestProtocolRecords.java|   4 +-
 .../TestRegisterNodeManagerRequest.java |   2 +-
 .../containermanager/ContainerManagerImpl.java  |  16 +-
 .../container/ContainerImpl.java|   7 +-
 .../queuing/QueuingContainerManagerImpl.java|   3 +-
 .../recovery/NMLeveldbStateStoreService.java|  41 ++-
 .../recovery/NMNullStateStoreService.java   |   4 +-
 .../recovery/NMStateStoreService.java   |  13 +-
 .../nodemanager/TestNodeManagerResync.java  |   2 +-
 .../nodemanager/TestNodeStatusUpdater.java  |  24 +-
 .../amrmproxy/MockResourceManagerFacade.java|   4 +-
 .../BaseContainerManagerTest.java   |   2 +-
 .../recovery/NMMemoryStateStoreService.java |   7 +-
 .../TestNMLeveldbStateStoreService.java |   7 +-
 .../nodemanager/webapp/MockContainer.java   |   2 +-
 .../nodemanager/webapp/TestNMWebServer.java |   6 +-
 .../ApplicationMasterService.java   |  54 ++-
 .../server/resourcemanager/RMServerUtils.java   | 338 ++-
 .../scheduler/AbstractYarnScheduler.java|  13 +-
 .../scheduler/SchedContainerChangeRequest.java  |   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  12 +-
 .../scheduler/YarnScheduler.java|   6 +-
 .../scheduler/capacity/CapacityScheduler.java   |   8 +-
 .../scheduler/fair/FairScheduler.java   |   6 +-
 .../scheduler/fifo/FifoScheduler.java   |   6 +-
 .../security/RMContainerTokenSecretManager.java |  64 ++--
 .../yarn/server/resourcemanager/MockAM.java |   7 +-
 .../resourcemanager/TestApplicationCleanup.java |   9 +-
 .../TestApplicationMasterService.java   |  86 +++--
 .../server/resourcemanager/TestRMRestart.java   |   2 +-
 .../TestResourceTrackerService.java |   8 +-
 .../capacity/TestCapacityScheduler.java |  42 ++-
 .../capacity/TestContainerAllocation.java   |  13 +-
 .../capacity/TestContainerResizing.java | 134 +---
 .../capacity/TestIncreaseAllocationExpir

[28/50] [abbrv] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

2016-09-06 Thread drankye
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index b271b37..38ef59b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -2092,7 +2092,7 @@ public class TestRMRestart extends 
ParameterizedSchedulerTestBase {
   String nodeLabelExpression) {
 ContainerId containerId = ContainerId.newContainerId(appAttemptId, id);
 NMContainerStatus containerReport =
-NMContainerStatus.newInstance(containerId, containerState,
+NMContainerStatus.newInstance(containerId, 0, containerState,
 Resource.newInstance(1024, 1), "recover container", 0,
 Priority.newInstance(0), 0, nodeLabelExpression);
 return containerReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index aa5b336..c2a20a1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -1127,7 +1127,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 NMContainerStatus report =
 NMContainerStatus.newInstance(
   ContainerId.newContainerId(
-ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
+ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), 0,
   ContainerState.COMPLETE, Resource.newInstance(1024, 1),
   "Dummy Completed", 0, Priority.newInstance(10), 1234);
 rm.getResourceTrackerService().handleNMContainerStatus(report, null);
@@ -1138,7 +1138,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 (RMAppAttemptImpl) app.getCurrentAppAttempt();
 currentAttempt.setMasterContainer(null);
 report = NMContainerStatus.newInstance(
-  ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0),
+  ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), 0,
   ContainerState.COMPLETE, Resource.newInstance(1024, 1),
   "Dummy Completed", 0, Priority.newInstance(10), 1234);
 rm.getResourceTrackerService().handleNMContainerStatus(report, null);
@@ -1150,7 +1150,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 // Case 2.1: AppAttemptId is null
 report = NMContainerStatus.newInstance(
   ContainerId.newContainerId(
-ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
+ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), 0,
   ContainerState.COMPLETE, Resource.newInstance(1024, 1),
   "Dummy Completed", 0, Priority.newInstance(10), 1234);
 try {
@@ -1165,7 +1165,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 (RMAppAttemptImpl) app.getCurrentAppAttempt();
 currentAttempt.setMasterContainer(null);
 report = NMContainerStatus.newInstance(
-  ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0),
+  ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), 0,
   ContainerState.COMPLETE, Resource.newInstance(1024, 1),
   "Dummy Completed", 0, Priority.newInstance(10), 1234);
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanag

[16/50] [abbrv] hadoop git commit: MAPREDUCE-6768. TestRecovery.testSpeculative failed with NPE. Contributed by Haibo Chen

2016-09-06 Thread drankye
MAPREDUCE-6768. TestRecovery.testSpeculative failed with NPE. Contributed by 
Haibo Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69f72776
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69f72776
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69f72776

Branch: refs/heads/HADOOP-12756
Commit: 69f7277625b86a30a5964285d05dac4ba982e795
Parents: 6742fb6
Author: Jason Lowe 
Authored: Mon Aug 29 19:54:34 2016 +
Committer: Jason Lowe 
Committed: Mon Aug 29 19:56:09 2016 +

--
 .../org/apache/hadoop/mapreduce/v2/app/TestRecovery.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69f72776/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
index 4c6ee72..3ede2e9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+import com.google.common.base.Supplier;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -96,6 +97,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.junit.Test;
@@ -1196,6 +1198,12 @@ public class TestRecovery {
 TaskAttempt task1Attempt2 = t1it.next();
 TaskAttempt task2Attempt = 
mapTask2.getAttempts().values().iterator().next();
 
+// wait for the second task attempt to be assigned.
+GenericTestUtils.waitFor(new Supplier() {
+  @Override public Boolean get() {
+return task1Attempt2.getAssignedContainerID() != null;
+  }
+}, 10, 1);
 ContainerId t1a2contId = task1Attempt2.getAssignedContainerID();
 
 LOG.info(t1a2contId.toString());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

2016-09-06 Thread drankye
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index 112095e..4bcdf5c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -70,7 +70,7 @@ public class NMNullStateStoreService extends 
NMStateStoreService {
   }
 
   @Override
-  public void storeContainer(ContainerId containerId,
+  public void storeContainer(ContainerId containerId, int version,
   StartContainerRequest startRequest) throws IOException {
   }
 
@@ -90,7 +90,7 @@ public class NMNullStateStoreService extends 
NMStateStoreService {
 
   @Override
   public void storeContainerResourceChanged(ContainerId containerId,
-  Resource capability) throws IOException {
+  int version, Resource capability) throws IOException {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 57f35a4..9f9ee75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -77,6 +77,7 @@ public abstract class NMStateStoreService extends 
AbstractService {
 private int remainingRetryAttempts = ContainerRetryContext.RETRY_INVALID;
 private String workDir;
 private String logDir;
+int version;
 
 public RecoveredContainerStatus getStatus() {
   return status;
@@ -94,6 +95,10 @@ public abstract class NMStateStoreService extends 
AbstractService {
   return diagnostics;
 }
 
+public int getVersion() {
+  return version;
+}
+
 public StartContainerRequest getStartRequest() {
   return startRequest;
 }
@@ -130,6 +135,7 @@ public abstract class NMStateStoreService extends 
AbstractService {
 public String toString() {
   return new StringBuffer("Status: ").append(getStatus())
   .append(", Exit code: ").append(exitCode)
+  .append(", Version: ").append(version)
   .append(", Killed: ").append(getKilled())
   .append(", Diagnostics: ").append(getDiagnostics())
   .append(", Capability: ").append(getCapability())
@@ -306,11 +312,13 @@ public abstract class NMStateStoreService extends 
AbstractService {
   /**
* Record a container start request
* @param containerId the container ID
+   * @param containerVersion the container Version
* @param startRequest the container start request
* @throws IOException
*/
   public abstract void storeContainer(ContainerId containerId,
-  StartContainerRequest startRequest) throws IOException;
+  int containerVersion, StartContainerRequest startRequest)
+  throws IOException;
 
   /**
* Record that a container has been queued at the NM
@@ -331,11 +339,12 @@ public abstract class NMStateStoreService extends 
AbstractService {
   /**
* Record that a container resource has been changed
* @param containerId the container ID
+   * @param containerVersion the container version
* @param capability the container resource capability
* @throws IOException
*/
   public abstract void storeContainerResourceChanged(ContainerId containerId,
-  Resource capability) throws IOException;
+  int containerVersion, Resource capability) throws IOException;
 
   /**
* Record that a container has completed

http://git-wip-us.apac

[19/50] [abbrv] hadoop git commit: YARN-5550. TestYarnCLI#testGetContainers should format according to CONTAINER_PATTERN. Contributed by Jonathan Hung.

2016-09-06 Thread drankye
YARN-5550. TestYarnCLI#testGetContainers should format according to 
CONTAINER_PATTERN. Contributed by Jonathan Hung.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed6ff5cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed6ff5cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed6ff5cd

Branch: refs/heads/HADOOP-12756
Commit: ed6ff5cd789cab621fbfb88c8e8de4f215cd2961
Parents: 5d1609d
Author: Zhe Zhang 
Authored: Mon Aug 29 14:46:00 2016 -0700
Committer: Zhe Zhang 
Committed: Mon Aug 29 14:46:00 2016 -0700

--
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  9 +++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 39 ++--
 2 files changed, 17 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed6ff5cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index f0b1c47..21df6a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -67,15 +67,18 @@ public class ApplicationCLI extends YarnCLI {
   private static final String APPLICATION_ATTEMPTS_PATTERN =
 "%30s\t%20s\t%35s\t%35s"
   + System.getProperty("line.separator");
-  private static final String CONTAINER_PATTERN = 
-"%30s\t%20s\t%20s\t%20s\t%20s\t%20s\t%35s"
-  + System.getProperty("line.separator");
 
   private static final String APP_TYPE_CMD = "appTypes";
   private static final String APP_STATE_CMD = "appStates";
   private static final String APP_TAG_CMD = "appTags";
   private static final String ALLSTATES_OPTION = "ALL";
   private static final String QUEUE_CMD = "queue";
+
+  @VisibleForTesting
+  protected static final String CONTAINER_PATTERN =
+"%30s\t%20s\t%20s\t%20s\t%20s\t%20s\t%35s"
+  + System.getProperty("line.separator");
+
   public static final String APPLICATION = "application";
   public static final String APPLICATION_ATTEMPT = "applicationattempt";
   public static final String CONTAINER = "container";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed6ff5cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 1cd513b..3fdea40 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -313,34 +313,17 @@ public class TestYarnCLI {
 new OutputStreamWriter(baos, "UTF-8");
 PrintWriter pw = new PrintWriter(stream);
 pw.println("Total number of containers :3");
-pw.print("  Container-Id");
-pw.print("\t  Start Time");
-pw.print("\t Finish Time");
-pw.print("\t   State");
-pw.print("\tHost");
-pw.print("\t   Node Http Address");
-pw.println("\tLOG-URL");
-pw.print(" container_1234_0005_01_01");
-pw.print("\t"+dateFormat.format(new Date(time1)));
-pw.print("\t"+dateFormat.format(new Date(time2)));
-pw.print("\tCOMPLETE");
-pw.print("\t   host:1234");
-pw.print("\thttp://host:2345";);
-pw.println("\t logURL");
-pw.print(" container_1234_0005_01_02");
-pw.print("\t"+dateFormat.format(new Date(time1)));
-pw.print("\t"+dateFormat.format(new Date(time2)));
-pw.print("\tCOMPLETE");
-pw.print("\t   host:1234");
-pw.print("\thttp://host:2345";);
-pw.println("\t logURL");
-pw.print(" container_1234_0005_01_03");
-pw.print("\t"+dateFormat.format(new Date(time1)));
-pw.print("\t N/A");
-pw.print("\t RUNNING");
-pw.print("\t   host:1234");
-pw.print("\thttp://host:2345";);
-pw.println("\t

[45/50] [abbrv] hadoop git commit: HDFS-10822. Log DataNodes in the write pipeline. John Zhuge via Lei Xu

2016-09-06 Thread drankye
HDFS-10822. Log DataNodes in the write pipeline. John Zhuge via Lei Xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a8c5064
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a8c5064
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a8c5064

Branch: refs/heads/HADOOP-12756
Commit: 5a8c5064d1a1d596b1f5c385299a86ec6ab9ad1e
Parents: 23abb09
Author: Lei Xu 
Authored: Fri Sep 2 11:01:13 2016 -0700
Committer: Lei Xu 
Committed: Fri Sep 2 11:01:13 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a8c5064/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 062a416..5166c8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -508,6 +508,12 @@ class DataStreamer extends Daemon {
   private void initDataStreaming() {
 this.setName("DataStreamer for file " + src +
 " block " + block);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("nodes {} storageTypes {} storageIDs {}",
+  Arrays.toString(nodes),
+  Arrays.toString(storageTypes),
+  Arrays.toString(storageIDs));
+}
 response = new ResponseProcessor(nodes);
 response.start();
 stage = BlockConstructionStage.DATA_STREAMING;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-5567. Fix script exit code checking in NodeHealthScriptRunner#reportHealthStatus. (Yufei Gu via rchiang)

2016-09-06 Thread drankye
YARN-5567. Fix script exit code checking in 
NodeHealthScriptRunner#reportHealthStatus. (Yufei Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05ede003
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05ede003
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05ede003

Branch: refs/heads/HADOOP-12756
Commit: 05ede003868871addc30162e9707c3dc14ed6b7a
Parents: 8b57be1
Author: Ray Chiang 
Authored: Mon Aug 29 15:55:33 2016 -0700
Committer: Ray Chiang 
Committed: Mon Aug 29 15:55:33 2016 -0700

--
 .../java/org/apache/hadoop/util/NodeHealthScriptRunner.java   | 3 ++-
 .../org/apache/hadoop/util/TestNodeHealthScriptRunner.java| 7 +++
 2 files changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ede003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
index fc392c4..c3bef37 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
@@ -106,6 +106,7 @@ public class NodeHealthScriptRunner extends AbstractService 
{
 shexec.execute();
   } catch (ExitCodeException e) {
 // ignore the exit code of the script
+exceptionStackTrace = StringUtils.stringifyException(e);
 status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE;
 // On Windows, we will not hit the Stream closed IOException
 // thrown by stdout buffered reader for timeout event.
@@ -162,7 +163,7 @@ public class NodeHealthScriptRunner extends AbstractService 
{
 setHealthStatus(false, exceptionStackTrace);
 break;
   case FAILED_WITH_EXIT_CODE:
-setHealthStatus(true, "", now);
+setHealthStatus(false, exceptionStackTrace);
 break;
   case FAILED:
 setHealthStatus(false, shexec.getOutput());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ede003/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
index 8fc64d1..db61f5a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
@@ -91,6 +91,7 @@ public class TestNodeHealthScriptRunner {
   public void testNodeHealthScript() throws Exception {
 String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
 String normalScript = "echo \"I am all fine\"";
+String failWithExitCodeScript = "echo \"Not healthy\"; exit -1";
 String timeOutScript =
   Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
   : "sleep 4\necho \"I am fine\"";
@@ -124,6 +125,12 @@ public class TestNodeHealthScriptRunner {
 nodeHealthScriptRunner.isHealthy());
 Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
 
+// Script which fails with exit code.
+writeNodeHealthScriptFile(failWithExitCodeScript, true);
+timerTask.run();
+Assert.assertFalse("Node health status reported healthy",
+nodeHealthScriptRunner.isHealthy());
+
 // Timeout script.
 writeNodeHealthScriptFile(timeOutScript, true);
 timerTask.run();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

2016-09-06 Thread drankye
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
index d6db32c..0f0f571 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -27,17 +27,17 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
-import 
org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceChangeRequestPBImpl;
 import 
org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
-import 
org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceChangeRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import 
org.apache.hadoop.yarn.proto.YarnServiceProtos.UpdateContainerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
 import 
org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder;
 
@@ -52,8 +52,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
 
   private List ask = null;
   private List release = null;
-  private List increaseRequests = null;
-  private List decreaseRequests = null;
+  private List updateRequests = null;
   private ResourceBlacklistRequest blacklistRequest = null;
   
   public AllocateRequestPBImpl() {
@@ -99,11 +98,8 @@ public class AllocateRequestPBImpl extends AllocateRequest {
 if (this.release != null) {
   addReleasesToProto();
 }
-if (this.increaseRequests != null) {
-  addIncreaseRequestsToProto();
-}
-if (this.decreaseRequests != null) {
-  addDecreaseRequestsToProto();
+if (this.updateRequests != null) {
+  addUpdateRequestsToProto();
 }
 if (this.blacklistRequest != null) {
   builder.setBlacklistRequest(convertToProtoFormat(this.blacklistRequest));
@@ -166,37 +162,19 @@ public class AllocateRequestPBImpl extends 
AllocateRequest {
   }
   
   @Override
-  public List getIncreaseRequests() {
-initIncreaseRequests();
-return this.increaseRequests;
+  public List getUpdateRequests() {
+initUpdateRequests();
+return this.updateRequests;
   }
 
   @Override
-  public void setIncreaseRequests(
-  List increaseRequests) {
-if (increaseRequests == null) {
+  public void setUpdateRequests(List updateRequests) {
+if (updateRequests == null) {
   return;
 }
-initIncreaseRequests();
-this.increaseRequests.clear();
-this.increaseRequests.addAll(increaseRequests);
-  }
-
-  @Override
-  public List getDecreaseRequests() {
-initDecreaseRequests();
-return this.decreaseRequests;
-  }
-
-  @Override
-  public void setDecreaseRequests(
-  List decreaseRequests) {
-if (decreaseRequests == null) {
-  return;
-}
-initDecreaseRequests();
-this.decreaseRequests.clear();
-this.decreaseRequests.addAll(decreaseRequests);
+initUpdateRequests();
+this.updateRequests.clear();
+this.updateRequests.addAll(updateRequests);
   }
 
   @Override
@@ -239,7 +217,8 @@ public class AllocateRequestPBImpl extends AllocateRequest {
 builder.clearAsk();
 if (ask == null)
   return;
-Iterable iterable = new 
Iterable() {
+Iterable iterable =
+new Iterable() {
   @Override
   public Iterator iterator() {
 return new Iterator() {
@@ -268,84 +247,34 @@ public class AllocateRequestPBImpl extends 
AllocateRequest {
 builder.addAllAsk(iterable);
   }
   
-  private void initIncreaseRequests() {
-if (this.increaseRequests

[49/50] [abbrv] hadoop git commit: HDFS-9781. FsDatasetImpl#getBlockReports can occasionally throw NullPointerException. Contributed by Manoj Govindassamy.

2016-09-06 Thread drankye
HDFS-9781. FsDatasetImpl#getBlockReports can occasionally throw 
NullPointerException. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07650bc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07650bc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07650bc3

Branch: refs/heads/HADOOP-12756
Commit: 07650bc37a3c78ecc6566d813778d0954d0b06b0
Parents: f6ea9be
Author: Xiao Chen 
Authored: Fri Sep 2 15:26:20 2016 -0700
Committer: Xiao Chen 
Committed: Fri Sep 2 15:33:11 2016 -0700

--
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 21 +++--
 .../fsdataset/impl/TestFsDatasetImpl.java   | 49 
 2 files changed, 56 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07650bc3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 129024b..e0d2baf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1827,13 +1827,24 @@ class FsDatasetImpl implements 
FsDatasetSpi {
 Map builders =
 new HashMap();
 
-List curVolumes = volumes.getVolumes();
-for (FsVolumeSpi v : curVolumes) {
-  builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
-}
-
+List curVolumes = null;
 try (AutoCloseableLock lock = datasetLock.acquire()) {
+  curVolumes = volumes.getVolumes();
+  for (FsVolumeSpi v : curVolumes) {
+builders.put(v.getStorageID(), 
BlockListAsLongs.builder(maxDataLength));
+  }
+
+  Set missingVolumesReported = new HashSet<>();
   for (ReplicaInfo b : volumeMap.replicas(bpid)) {
+String volStorageID = b.getVolume().getStorageID();
+if (!builders.containsKey(volStorageID)) {
+  if (!missingVolumesReported.contains(volStorageID)) {
+LOG.warn("Storage volume: " + volStorageID + " missing for the"
++ " replica block: " + b + ". Probably being removed!");
+missingVolumesReported.add(volStorageID);
+  }
+  continue;
+}
 switch(b.getState()) {
   case FINALIZED:
   case RBW:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07650bc3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index e73a612..b946803 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -590,8 +590,15 @@ public class TestFsDatasetImpl {
 final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
 final CountDownLatch startFinalizeLatch = new CountDownLatch(1);
 final CountDownLatch brReceivedLatch = new CountDownLatch(1);
+final CountDownLatch volRemovedLatch = new CountDownLatch(1);
 class BlockReportThread extends Thread {
   public void run() {
+// Lets wait for the volume remove process to start
+try {
+  volRemovedLatch.await();
+} catch (Exception e) {
+  LOG.info("Unexpected exception when waiting for vol removal:", e);
+}
 LOG.info("Getting block report");
 dataset.getBlockReports(eb.getBlockPoolId());
 LOG.info("Successfully received block report");
@@ -599,18 +606,27 @@ public class TestFsDatasetImpl {
   }
 }
 
-final BlockReportThread brt = new BlockReportThread();
 class ResponderThread extends Thread {
   public void run() {
 try (ReplicaHandler replica = dataset
 .createRbw(StorageType.DEFAULT, eb, false)) {
-  LOG.info("createRbw finished");
+  LOG.info("CreateRbw finished");
   startFinalizeLatch.countDown();
 
-  // Slow down while we're holding the ref

[38/50] [abbrv] hadoop git commit: HADOOP-13375. o.a.h.security.TestGroupsCaching.testBackgroundRefreshCounters seems flaky. (Contributed by Weiwei Yang)

2016-09-06 Thread drankye
HADOOP-13375. o.a.h.security.TestGroupsCaching.testBackgroundRefreshCounters 
seems flaky. (Contributed by Weiwei Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcd21d08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcd21d08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcd21d08

Branch: refs/heads/HADOOP-12756
Commit: dcd21d083ab2a66fc3ca3bfda03887461698b7b1
Parents: 08f55cc
Author: Mingliang Liu 
Authored: Thu Sep 1 11:03:06 2016 -0700
Committer: Mingliang Liu 
Committed: Thu Sep 1 11:03:06 2016 -0700

--
 .../java/org/apache/hadoop/security/Groups.java | 33 +++
 .../hadoop/security/TestGroupsCaching.java  | 91 +---
 2 files changed, 95 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd21d08/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index 4891907..32660c2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -43,6 +43,8 @@ import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
@@ -355,23 +357,24 @@ public class Groups {
   executorService.submit(new Callable>() {
 @Override
 public List call() throws Exception {
-  boolean success = false;
-  try {
-backgroundRefreshQueued.decrementAndGet();
-backgroundRefreshRunning.incrementAndGet();
-List results = load(key);
-success = true;
-return results;
-  } finally {
-backgroundRefreshRunning.decrementAndGet();
-if (success) {
-  backgroundRefreshSuccess.incrementAndGet();
-} else {
-  backgroundRefreshException.incrementAndGet();
-}
-  }
+  backgroundRefreshQueued.decrementAndGet();
+  backgroundRefreshRunning.incrementAndGet();
+  List results = load(key);
+  return results;
 }
   });
+  Futures.addCallback(listenableFuture, new FutureCallback>() 
{
+@Override
+public void onSuccess(List result) {
+  backgroundRefreshSuccess.incrementAndGet();
+  backgroundRefreshRunning.decrementAndGet();
+}
+@Override
+public void onFailure(Throwable t) {
+  backgroundRefreshException.incrementAndGet();
+  backgroundRefreshRunning.decrementAndGet();
+}
+  });
   return listenableFuture;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd21d08/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 5a5596e..58c2d1a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -25,11 +25,16 @@ import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.FakeTimer;
 import org.junit.Before;
 import org.junit.Test;
+
+import com.google.common.base.Supplier;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
@@ -67,6 +72,7 @@ public class TestGroupsCaching {
 private static int requestCount = 0;
 private static long getGroupsDelayMs = 0;
 private static boolean throwException;
+private static volat

[44/50] [abbrv] hadoop git commit: HADOOP-13547. Optimize IPC client protobuf decoding. Contributed by Daryn Sharp.

2016-09-06 Thread drankye
HADOOP-13547. Optimize IPC client protobuf decoding. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23abb09c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23abb09c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23abb09c

Branch: refs/heads/HADOOP-12756
Commit: 23abb09c1f979d8c18ece81e32630a35ed569399
Parents: 05f5c0f
Author: Kihwal Lee 
Authored: Fri Sep 2 11:03:18 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Sep 2 11:03:18 2016 -0500

--
 .../main/java/org/apache/hadoop/ipc/Client.java |  77 +++---
 .../apache/hadoop/ipc/ProtobufRpcEngine.java| 241 +++
 .../org/apache/hadoop/ipc/ResponseBuffer.java   |  12 +-
 .../java/org/apache/hadoop/ipc/RpcWritable.java |  20 +-
 .../apache/hadoop/security/SaslRpcClient.java   |  45 ++--
 5 files changed, 102 insertions(+), 293 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23abb09c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 183bad4..567b932 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ipc;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.CodedOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,13 +30,11 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcRequestMessageWrapper;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -54,7 +51,6 @@ import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ProtoUtil;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.AsyncGet;
@@ -65,6 +61,7 @@ import javax.net.SocketFactory;
 import javax.security.sasl.Sasl;
 import java.io.*;
 import java.net.*;
+import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.*;
 import java.util.Map.Entry;
@@ -429,7 +426,7 @@ public class Client implements AutoCloseable {
 private final boolean doPing; //do we need to send ping message
 private final int pingInterval; // how often sends ping to the server
 private final int soTimeout; // used by ipc ping and rpc timeout
-private ByteArrayOutputStream pingRequest; // ping message
+private ResponseBuffer pingRequest; // ping message
 
 // currently active calls
 private Hashtable calls = new Hashtable();
@@ -459,7 +456,7 @@ public class Client implements AutoCloseable {
   this.doPing = remoteId.getDoPing();
   if (doPing) {
 // construct a RPC header with the callId as the ping callId
-pingRequest = new ByteArrayOutputStream();
+pingRequest = new ResponseBuffer();
 RpcRequestHeaderProto pingHeader = ProtoUtil
 .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
 OperationProto.RPC_FINAL_PACKET, PING_CALL_ID,
@@ -979,12 +976,10 @@ public class Client implements AutoCloseable {
   .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
   OperationProto.RPC_FINAL_PACKET, CONNECTION_CONTEXT_CALL_ID,
   RpcConstants.INVALID_RETRY_COUNT, clientId);
-  RpcRequestMessageWrapper request =
-  new RpcRequestMessageWrapper(connectionContextHeader, message);
-  
-  // Write out the packet length
-  out.writeInt(request.getLength());
-

[04/50] [abbrv] hadoop git commit: HDFS-10768. Optimize mkdir ops. Contributed by Daryn Sharp.

2016-09-06 Thread drankye
HDFS-10768. Optimize mkdir ops. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b7adf4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b7adf4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b7adf4d

Branch: refs/heads/HADOOP-12756
Commit: 8b7adf4ddf420a93c586c4b2eac27dd0f649682e
Parents: cde3a00
Author: Kihwal Lee 
Authored: Fri Aug 26 15:39:21 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Aug 26 15:39:56 2016 -0500

--
 .../hdfs/server/namenode/FSDirMkdirOp.java  | 91 +---
 .../hdfs/server/namenode/FSDirSymlinkOp.java| 15 ++--
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 16 ++--
 .../hdfs/server/namenode/INodesInPath.java  | 35 ++--
 .../server/namenode/TestSnapshotPathINodes.java |  5 ++
 5 files changed, 68 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b7adf4d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 8aac1f8..bf5ff00 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -32,10 +32,7 @@ import 
org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 import java.io.IOException;
-import java.util.AbstractMap;
 import java.util.List;
-import java.util.Map;
-
 import static org.apache.hadoop.util.Time.now;
 
 class FSDirMkdirOp {
@@ -63,7 +60,6 @@ class FSDirMkdirOp {
 throw new FileAlreadyExistsException("Path is not a directory: " + 
src);
   }
 
-  INodesInPath existing = lastINode != null ? iip : 
iip.getExistingINodes();
   if (lastINode == null) {
 if (fsd.isPermissionEnabled()) {
   fsd.checkAncestorAccess(pc, iip, FsAction.WRITE);
@@ -78,26 +74,20 @@ class FSDirMkdirOp {
 // create multiple inodes.
 fsn.checkFsObjectLimit();
 
-List nonExisting = iip.getPath(existing.length(),
-iip.length() - existing.length());
-int length = nonExisting.size();
-if (length > 1) {
-  List ancestors = nonExisting.subList(0, length - 1);
-  // Ensure that the user can traversal the path by adding implicit
-  // u+wx permission to all ancestor directories
-  existing = createChildrenDirectories(fsd, existing, ancestors,
-  addImplicitUwx(permissions, permissions));
-  if (existing == null) {
-throw new IOException("Failed to create directory: " + src);
-  }
+// Ensure that the user can traversal the path by adding implicit
+// u+wx permission to all ancestor directories.
+INodesInPath existing =
+createParentDirectories(fsd, iip, permissions, false);
+if (existing != null) {
+  existing = createSingleDirectory(
+  fsd, existing, iip.getLastLocalName(), permissions);
 }
-
-if ((existing = createChildrenDirectories(fsd, existing,
-nonExisting.subList(length - 1, length), permissions)) == null) {
+if (existing == null) {
   throw new IOException("Failed to create directory: " + src);
 }
+iip = existing;
   }
-  return fsd.getAuditFileInfo(existing);
+  return fsd.getAuditFileInfo(iip);
 } finally {
   fsd.writeUnlock();
 }
@@ -112,35 +102,18 @@ class FSDirMkdirOp {
* For example, path="/foo/bar/spam", "/foo" is an existing directory,
* "/foo/bar" is not existing yet, the function will create directory bar.
*
-   * @return a tuple which contains both the new INodesInPath (with all the
-   * existing and newly created directories) and the last component in the
-   * relative path. Or return null if there are errors.
+   * @return a INodesInPath with all the existing and newly created
+   * ancestor directories created.
+   * Or return null if there are errors.
*/
-  static Map.Entry createAncestorDirectories(
+  static INodesInPath createAncestorDirectories(
   FSDirectory fsd, INodesInPath iip, PermissionStatus permission)
   throws IOException {
-final String last = DFSUtil.bytes2String(iip.getLastLocalName());
-INodesInPath existing = iip.getExistingINodes();
-List children = iip.getPath(existing.length(),
-iip.length() - exis

[39/50] [abbrv] hadoop git commit: FsPermission string constructor does not recognize sticky bit. Contributed by Atul Sikaria

2016-09-06 Thread drankye
FsPermission string constructor does not recognize sticky bit. Contributed by 
Atul Sikaria


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3069df75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3069df75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3069df75

Branch: refs/heads/HADOOP-12756
Commit: 3069df75513180a5b56130a7e05c6e339ab0a66a
Parents: dcd21d0
Author: Chris Douglas 
Authored: Thu Sep 1 11:32:26 2016 -0700
Committer: Chris Douglas 
Committed: Thu Sep 1 11:32:26 2016 -0700

--
 .../hadoop/fs/permission/FsPermission.java  |2 +-
 .../apache/hadoop/fs/permission/RawParser.java  |   44 +
 .../hadoop/fs/permission/TestFsPermission.java  | 1061 +-
 3 files changed, 585 insertions(+), 522 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3069df75/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index b535fd6..78255bbd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -103,7 +103,7 @@ public class FsPermission implements Writable {
* @throws IllegalArgumentException if mode is invalid
*/
   public FsPermission(String mode) {
-this(new UmaskParser(mode).getUMask());
+this(new RawParser(mode).getPermission());
   }
 
   /** Return user {@link FsAction}. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3069df75/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/RawParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/RawParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/RawParser.java
new file mode 100644
index 000..3bbe9cb
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/RawParser.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.permission;
+
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class RawParser extends PermissionParser {
+  private static Pattern rawOctalPattern =
+  Pattern.compile("^\\s*([01]?)([0-7]{3})\\s*$");
+  private static Pattern rawNormalPattern =
+  Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxt]*)([,\\s]*)\\s*");
+
+  private short permission;
+
+  public RawParser(String modeStr) throws IllegalArgumentException {
+super(modeStr, rawNormalPattern, rawOctalPattern);
+permission = (short)combineModes(0, false);
+  }
+
+  public short getPermission() {
+return permission;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3069df75/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
index 04dbe01..6368a57 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
@@ -56,6 +56,8 @@ public class TestFsPermission extends

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HADOOP-12756

2016-09-06 Thread drankye
Merge branch 'trunk' into HADOOP-12756


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b68b11b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b68b11b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b68b11b

Branch: refs/heads/HADOOP-12756
Commit: 8b68b11bd7a44116ae325b8b9cb57299578523e1
Parents: b2d0db8 07650bc
Author: Kai Zheng 
Authored: Wed Sep 7 16:43:44 2016 +0800
Committer: Kai Zheng 
Committed: Wed Sep 7 16:43:44 2016 +0800

--
 .gitignore  | 5 +
 .../dev-support/jdiff-workaround.patch  |98 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml| 46648 +
 hadoop-common-project/hadoop-common/pom.xml | 7 +
 .../src/main/bin/hadoop-functions.sh| 4 +-
 .../org/apache/hadoop/conf/ConfigRedactor.java  |84 +
 .../org/apache/hadoop/conf/Configuration.java   | 5 +-
 .../apache/hadoop/conf/ReconfigurableBase.java  |13 +-
 .../apache/hadoop/crypto/key/KeyProvider.java   | 4 +
 .../apache/hadoop/fs/ChecksumFileSystem.java| 7 +-
 .../java/org/apache/hadoop/fs/ChecksumFs.java   | 4 +-
 .../fs/CommonConfigurationKeysPublic.java   |40 +
 .../org/apache/hadoop/fs/ContentSummary.java|   127 +-
 .../java/org/apache/hadoop/fs/FileContext.java  | 4 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |54 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |73 +-
 .../main/java/org/apache/hadoop/fs/Options.java | 3 +-
 .../java/org/apache/hadoop/fs/TrashPolicy.java  |26 -
 .../apache/hadoop/fs/TrashPolicyDefault.java|21 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java | 3 +-
 .../hadoop/fs/permission/FsPermission.java  | 2 +-
 .../apache/hadoop/fs/permission/RawParser.java  |44 +
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   | 4 +-
 .../java/org/apache/hadoop/fs/shell/Count.java  |20 +-
 .../org/apache/hadoop/fs/shell/FsUsage.java |31 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java | 6 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java |12 +-
 .../org/apache/hadoop/http/HttpServer2.java |76 +-
 .../apache/hadoop/io/ElasticByteBufferPool.java | 1 +
 .../org/apache/hadoop/io/SecureIOUtils.java | 3 +-
 .../rawcoder/NativeRSRawDecoder.java| 5 +
 .../rawcoder/NativeRSRawEncoder.java| 5 +
 .../rawcoder/RSRawDecoderLegacy.java|56 +-
 .../hadoop/io/retry/RetryInvocationHandler.java | 8 +-
 .../apache/hadoop/io/retry/package-info.java|22 +
 .../org/apache/hadoop/io/retry/package.html |48 -
 .../org/apache/hadoop/ipc/CallerContext.java| 6 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |77 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|   267 +-
 .../org/apache/hadoop/ipc/ResponseBuffer.java   |12 +-
 .../java/org/apache/hadoop/ipc/RpcWritable.java |20 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   336 +-
 .../org/apache/hadoop/ipc/package-info.java | 4 +
 .../java/org/apache/hadoop/ipc/package.html |23 -
 .../org/apache/hadoop/jmx/JMXJsonServlet.java   |11 +-
 .../metrics2/sink/RollingFileSystemSink.java| 6 +-
 .../java/org/apache/hadoop/security/Groups.java |57 +-
 .../hadoop/security/LdapGroupsMapping.java  |12 +-
 .../apache/hadoop/security/SaslRpcClient.java   |64 +-
 .../hadoop/security/UserGroupInformation.java   |28 +-
 .../alias/AbstractJavaKeyStoreProvider.java | 4 +-
 .../security/alias/CredentialProvider.java  | 6 +-
 .../alias/CredentialProviderFactory.java| 6 +-
 .../security/authorize/AccessControlList.java   | 2 +-
 .../security/ssl/ReloadingX509TrustManager.java |14 +-
 .../ssl/SslSelectChannelConnectorSecure.java|58 +
 .../security/ssl/SslSocketConnectorSecure.java  |58 -
 .../ZKDelegationTokenSecretManager.java |44 +
 .../DelegationTokenAuthenticationFilter.java|10 +
 .../hadoop/util/ApplicationClassLoader.java |20 +-
 .../apache/hadoop/util/AutoCloseableLock.java   |   122 +
 .../org/apache/hadoop/util/DataChecksum.java| 4 +-
 .../hadoop/util/GenericOptionsParser.java   |   134 +-
 .../org/apache/hadoop/util/HostsFileReader.java |   111 +-
 .../util/InvalidChecksumSizeException.java  |32 +
 .../hadoop/util/NodeHealthScriptRunner.java | 3 +-
 .../java/org/apache/hadoop/util/RunJar.java | 6 +-
 .../main/java/org/apache/hadoop/util/Shell.java |13 +-
 .../apache/hadoop/io/erasurecode/jni_common.c   |15 +-
 .../src/main/resources/core-default.xml |   127 +-
 .../src/site/markdown/Benchmarking.md   |   106 +
 .../src/site/markdown/CredentialProviderAPI.md  | 4 +-
 .../src/site/markdown/FileSystemShell.md|11 +-
 .../src/site/markdown

[34/50] [abbrv] hadoop git commit: HDFS-10784. Implement WebHdfsFileSystem#listStatusIterator.

2016-09-06 Thread drankye
HDFS-10784. Implement WebHdfsFileSystem#listStatusIterator.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85bab5fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85bab5fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85bab5fb

Branch: refs/heads/HADOOP-12756
Commit: 85bab5fb572194fda38854f1f21c670925058009
Parents: 01721dd
Author: Andrew Wang 
Authored: Wed Aug 31 14:29:37 2016 -0700
Committer: Andrew Wang 
Committed: Wed Aug 31 14:29:37 2016 -0700

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  21 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  55 ++
 .../hadoop/hdfs/web/resources/GetOpParam.java   |   3 +-
 .../hdfs/web/resources/StartAfterParam.java |  38 +++
 .../web/resources/NamenodeWebHdfsMethods.java   |  27 +++--
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  45 ++--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 105 +++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  55 --
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |  25 +
 .../hadoop/hdfs/web/resources/TestParam.java|   7 ++
 10 files changed, 361 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85bab5fb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index baebff2..35f0f9a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.hadoop.fs.ContentSummary;
@@ -33,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -141,6 +143,25 @@ class JsonUtilClient {
 storagePolicy, null);
   }
 
+  static DirectoryListing toDirectoryListing(final Map json) {
+if (json == null) {
+  return null;
+}
+final List list = JsonUtilClient.getList(json,
+"partialListing");
+
+HdfsFileStatus[] partialListing = new HdfsFileStatus[list.size()];
+int i = 0;
+for (Object o : list) {
+  final Map m = (Map) o;
+  partialListing[i++] = toFileStatus(m, false);
+}
+int remainingEntries = getInt(json, "remainingEntries", -1);
+Preconditions.checkState(remainingEntries != -1,
+"remainingEntries was not set");
+return new DirectoryListing(partialListing, remainingEntries);
+  }
+
   /** Convert a Json map to an ExtendedBlock object. */
   static ExtendedBlock toExtendedBlock(final Map m) {
 if (m == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85bab5fb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 9e42b24..968eea3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
@@ -79,6 +80,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import

[43/50] [abbrv] hadoop git commit: YARN-5555. Scheduler UI: "% of Queue" is inaccurate if leaf queue is hierarchically nested. Contributed by Eric Payne.

2016-09-06 Thread drankye
YARN-. Scheduler UI: "% of Queue" is inaccurate if leaf queue is 
hierarchically nested. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05f5c0f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05f5c0f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05f5c0f6

Branch: refs/heads/HADOOP-12756
Commit: 05f5c0f631680cffc36a79550c351620615445db
Parents: 0690f09
Author: Varun Vasudev 
Authored: Fri Sep 2 16:02:01 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Sep 2 16:02:01 2016 +0530

--
 .../scheduler/common/fica/FiCaSchedulerApp.java | 27 ++
 .../scheduler/capacity/TestLeafQueue.java   | 87 
 2 files changed, 114 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05f5c0f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 33dee80..9c84a23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -57,6 +58,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractCSQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSAMContainerLaunchDiagnosticsConstants;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSAssignment;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityHeadroomProvider;
@@ -617,4 +619,29 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
   updateAMContainerDiagnostics(AMState.ACTIVATED, 
diagnosticMessageBldr.toString());
 }
   }
+
+  /**
+   * Recalculates the per-app, percent of queue metric, specific to the
+   * Capacity Scheduler.
+   */
+  @Override
+  public synchronized ApplicationResourceUsageReport getResourceUsageReport() {
+ApplicationResourceUsageReport report = super.getResourceUsageReport();
+Resource cluster = rmContext.getScheduler().getClusterResource();
+Resource totalPartitionRes =
+rmContext.getNodeLabelManager()
+  .getResourceByLabel(getAppAMNodePartitionName(), cluster);
+ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator();
+if (!calc.isInvalidDivisor(totalPartitionRes)) {
+  float queueAbsMaxCapPerPartition =
+  ((AbstractCSQueue)getQueue()).getQueueCapacities()
+.getAbsoluteCapacity(getAppAMNodePartitionName());
+  float queueUsagePerc =
+  calc.divide(totalPartitionRes, report.getUsedResources(),
+  Resources.multiply(totalPartitionRes,
+  queueAbsMaxCapPerPartition)) * 100;
+  report.setQueueUsagePercentage(queueUsagePerc);
+}
+return report;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05f5c0f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/

[46/50] [abbrv] hadoop git commit: YARN-5549. AMLauncher#createAMContainerLaunchContext() should not log the command to be launched indiscriminately. (Daniel Templeton via rchiang)

2016-09-06 Thread drankye
YARN-5549. AMLauncher#createAMContainerLaunchContext() should not log the 
command to be launched indiscriminately. (Daniel Templeton via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/378f624a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/378f624a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/378f624a

Branch: refs/heads/HADOOP-12756
Commit: 378f624a392550770d1db33cb4cee3ef7d5facd4
Parents: 5a8c506
Author: Ray Chiang 
Authored: Fri Sep 2 11:07:39 2016 -0700
Committer: Ray Chiang 
Committed: Fri Sep 2 11:14:35 2016 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 12 +
 .../src/main/resources/yarn-default.xml | 13 ++
 .../resourcemanager/amlauncher/AMLauncher.java  | 26 
 3 files changed, 46 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/378f624a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 46e3323..86e8a95 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -534,6 +534,18 @@ public class YarnConfiguration extends Configuration {
   public static final int
   DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE = 10;
 
+  /**
+   * The {@code AMLauncher.createAMContainerLaunchContext()} method will log 
the
+   * command being executed to the RM log if this property is true. Commands
+   * may contain sensitive information, such as application or service
+   * passwords, making logging the commands a security risk. In cases where
+   * the cluster may be running applications with such commands, this property
+   * should be set to false. Commands are only logged at the debug level.
+   */
+  public static final String RM_AMLAUNCHER_LOG_COMMAND =
+  RM_PREFIX + "amlauncher.log.command";
+  public static final boolean DEFAULT_RM_AMLAUNCHER_LOG_COMMAND = false;
+
   //RM delegation token related keys
   public static final String RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY =
 RM_PREFIX + "delegation.key.update-interval";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/378f624a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e956507..423b78b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -299,6 +299,19 @@
   
 
   
+
+  The resource manager will log all commands being executed to the RM log
+  if this property is true. Commands may contain sensitive information,
+  such as application or service passwords, making logging the commands a
+  security risk. In cases where the cluster may be running applications 
with
+  such commands this property should be set to false. Commands are only
+  logged at the debug level.
+
+yarn.resourcemanager.amlauncher.log.command
+false
+  
+
+  
 The class to use as the resource scheduler.
 yarn.resourcemanager.scheduler.class
 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler

http://git-wip-us.apache.org/repos/asf/hadoop/blob/378f624a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index 4aace2c..181463a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/A

[40/50] [abbrv] hadoop git commit: HADOOP-13465. Design Server.Call to be extensible for unified call queue. Contributed by Daryn Sharp.

2016-09-06 Thread drankye
HADOOP-13465. Design Server.Call to be extensible for unified call queue. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76cd81f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76cd81f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76cd81f4

Branch: refs/heads/HADOOP-12756
Commit: 76cd81f4b656f0d40a4b2122e15f04ea53d8020b
Parents: 3069df7
Author: Kihwal Lee 
Authored: Thu Sep 1 15:44:44 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Sep 1 15:44:44 2016 -0500

--
 .../main/java/org/apache/hadoop/ipc/Server.java | 336 +++
 1 file changed, 191 insertions(+), 145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76cd81f4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 4c73f6a..80b3303 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -354,10 +354,9 @@ public abstract class Server {
*/
   public static InetAddress getRemoteIp() {
 Call call = CurCall.get();
-return (call != null && call.connection != null) ? call.connection
-.getHostInetAddress() : null;
+return (call != null ) ? call.getHostInetAddress() : null;
   }
-  
+
   /**
* Returns the clientId from the current RPC request
*/
@@ -380,10 +379,9 @@ public abstract class Server {
*/
   public static UserGroupInformation getRemoteUser() {
 Call call = CurCall.get();
-return (call != null && call.connection != null) ? call.connection.user
-: null;
+return (call != null) ? call.getRemoteUser() : null;
   }
- 
+
   /** Return true if the invocation was through an RPC.
*/
   public static boolean isRpcInvocation() {
@@ -483,7 +481,7 @@ public abstract class Server {
 if ((rpcMetrics.getProcessingSampleCount() > minSampleSize) &&
 (processingTime > threeSigma)) {
   if(LOG.isWarnEnabled()) {
-String client = CurCall.get().connection.toString();
+String client = CurCall.get().toString();
 LOG.warn(
 "Slow RPC : " + methodName + " took " + processingTime +
 " milliseconds to process from client " + client);
@@ -657,62 +655,65 @@ public abstract class Server {
 CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT);
   }
 
-  /** A call queued for handling. */
-  public static class Call implements Schedulable {
-private final int callId; // the client's call id
-private final int retryCount;// the retry count of the call
-private final Writable rpcRequest;// Serialized Rpc request from client
-private final Connection connection;  // connection to client
-private long timestamp;   // time received when response is 
null
-  // time served when response is not 
null
-private ByteBuffer rpcResponse;   // the response for this call
+  /** A generic call queued for handling. */
+  public static class Call implements Schedulable,
+  PrivilegedExceptionAction {
+final int callId;// the client's call id
+final int retryCount;// the retry count of the call
+long timestamp;  // time received when response is null
+ // time served when response is not null
 private AtomicInteger responseWaitCount = new AtomicInteger(1);
-private final RPC.RpcKind rpcKind;
-private final byte[] clientId;
+final RPC.RpcKind rpcKind;
+final byte[] clientId;
 private final TraceScope traceScope; // the HTrace scope on the server side
 private final CallerContext callerContext; // the call context
 private int priorityLevel;
 // the priority level assigned by scheduler, 0 by default
 
-private Call(Call call) {
-  this(call.callId, call.retryCount, call.rpcRequest, call.connection,
-  call.rpcKind, call.clientId, call.traceScope, call.callerContext);
+Call(Call call) {
+  this(call.callId, call.retryCount, call.rpcKind, call.clientId,
+  call.traceScope, call.callerContext);
 }
 
-public Call(int id, int retryCount, Writable param, 
-Connection connection) {
-  this(id, retryCount, param, connection, RPC.RpcKind.RPC_BUILTIN,
-  RpcConstants.DUMMY_CLIENT_ID);
+Call(int id, int retryCount, RPC.RpcKind kind, byte[] clientId) {
+  this(id, retryCount, kind, clientId

[42/50] [abbrv] hadoop git commit: HDFS-10820. Reuse closeResponder to reset the response variable in DataStreamer#run. Contributed by Yiqun Lin.

2016-09-06 Thread drankye
HDFS-10820. Reuse closeResponder to reset the response variable in 
DataStreamer#run. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0690f096
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0690f096
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0690f096

Branch: refs/heads/HADOOP-12756
Commit: 0690f0969efef201b074a8b26400b2e5ba9b97ad
Parents: 74f4bae
Author: Xiao Chen 
Authored: Thu Sep 1 15:34:47 2016 -0700
Committer: Xiao Chen 
Committed: Thu Sep 1 15:35:25 2016 -0700

--
 .../main/java/org/apache/hadoop/hdfs/DataStreamer.java| 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0690f096/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index ee673ed..062a416 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -536,14 +536,8 @@ class DataStreamer extends Daemon {
 TraceScope scope = null;
 while (!streamerClosed && dfsClient.clientRunning) {
   // if the Responder encountered an error, shutdown Responder
-  if (errorState.hasError() && response != null) {
-try {
-  response.close();
-  response.join();
-  response = null;
-} catch (InterruptedException  e) {
-  LOG.warn("Caught exception", e);
-}
+  if (errorState.hasError()) {
+closeResponder();
   }
 
   DFSPacket one;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: YARN-5264. Store all queue-specific information in FSQueue. (Yufei Gu via kasha)

2016-09-06 Thread drankye
YARN-5264. Store all queue-specific information in FSQueue. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6ea9be5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6ea9be5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6ea9be5

Branch: refs/heads/HADOOP-12756
Commit: f6ea9be5473ab66798b0536317d2f32c5348eb57
Parents: cbd909c
Author: Karthik Kambatla 
Authored: Fri Sep 2 14:56:29 2016 -0700
Committer: Karthik Kambatla 
Committed: Fri Sep 2 14:56:29 2016 -0700

--
 .../scheduler/fair/AllocationConfiguration.java | 83 
 .../scheduler/fair/FSLeafQueue.java | 17 ++--
 .../scheduler/fair/FSParentQueue.java   |  8 +-
 .../resourcemanager/scheduler/fair/FSQueue.java | 71 ++---
 .../scheduler/fair/FairScheduler.java   |  7 +-
 .../scheduler/fair/MaxRunningAppsEnforcer.java  | 11 +--
 .../scheduler/fair/QueueManager.java| 45 ++-
 .../webapp/dao/FairSchedulerQueueInfo.java  |  2 +-
 .../scheduler/fair/TestAppRunnability.java  |  6 +-
 .../scheduler/fair/TestFSLeafQueue.java | 10 +--
 .../scheduler/fair/TestFairScheduler.java   | 11 ++-
 .../fair/TestMaxRunningAppsEnforcer.java| 20 ++---
 .../scheduler/fair/TestQueueManager.java| 17 ++--
 13 files changed, 160 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ea9be5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index 5cf110f..c771887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -22,6 +22,8 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 
 public class AllocationConfiguration extends ReservationSchedulerConfiguration 
{
+  private static final Log LOG = LogFactory.getLog(FSQueue.class.getName());
   private static final AccessControlList EVERYBODY_ACL = new 
AccessControlList("*");
   private static final AccessControlList NOBODY_ACL = new AccessControlList(" 
");
   private static final ResourceCalculator RESOURCE_CALCULATOR =
@@ -242,26 +245,24 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 return !nonPreemptableQueues.contains(queueName);
   }
 
-  public ResourceWeights getQueueWeight(String queue) {
+  private ResourceWeights getQueueWeight(String queue) {
 ResourceWeights weight = queueWeights.get(queue);
 return (weight == null) ? ResourceWeights.NEUTRAL : weight;
   }
 
-  public void setQueueWeight(String queue, ResourceWeights weight) {
-queueWeights.put(queue, weight);
-  }
-  
   public int getUserMaxApps(String user) {
 Integer maxApps = userMaxApps.get(user);
 return (maxApps == null) ? userMaxAppsDefault : maxApps;
   }
 
-  public int getQueueMaxApps(String queue) {
+  @VisibleForTesting
+  int getQueueMaxApps(String queue) {
 Integer maxApps = queueMaxApps.get(queue);
 return (maxApps == null) ? queueMaxAppsDefault : maxApps;
   }
-  
-  public float getQueueMaxAMShare(String queue) {
+
+  @VisibleForTesting
+  float getQueueMaxAMShare(String queue) {
 Float maxAMShare = queueMaxAMShares.get(queue);
 return (maxAMShare == null) ? queueMaxAMShareDefault : maxAMShare;
   }
@@ -273,29 +274,21 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
* @return the min allocation on this queue or {@link Resources#none}
* if not set
*/
-  public Resource getMinResources(String queue

[37/50] [abbrv] hadoop git commit: YARN-5596. Fix failing unit test in TestDockerContainerRuntime. Contributed by Sidharta Seethana.

2016-09-06 Thread drankye
YARN-5596. Fix failing unit test in TestDockerContainerRuntime. Contributed by 
Sidharta Seethana.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08f55ccb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08f55ccb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08f55ccb

Branch: refs/heads/HADOOP-12756
Commit: 08f55ccbb075b90a6e534f4ac0f9840f2e6890d5
Parents: 6c60036
Author: Varun Vasudev 
Authored: Thu Sep 1 14:08:51 2016 +0530
Committer: Varun Vasudev 
Committed: Thu Sep 1 14:08:51 2016 +0530

--
 .../runtime/DockerLinuxContainerRuntime.java|  6 --
 .../runtime/TestDockerContainerRuntime.java | 20 
 2 files changed, 20 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f55ccb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 33b8add..a3aff2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -146,6 +146,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS =
   "YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS";
 
+  static final String CGROUPS_ROOT_DIRECTORY = "/sys/fs/cgroup";
+
   private Configuration conf;
   private DockerClient dockerClient;
   private PrivilegedOperationExecutor privilegedOperationExecutor;
@@ -437,7 +439,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 LOCALIZED_RESOURCES);
 @SuppressWarnings("unchecked")
 List userLocalDirs = ctx.getExecutionAttribute(USER_LOCAL_DIRS);
-
 Set capabilities = new HashSet<>(Arrays.asList(conf.getStrings(
 YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
 YarnConfiguration.DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES)));
@@ -449,7 +450,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 .setContainerWorkDir(containerWorkDir.toString())
 .setNetworkType(network)
 .setCapabilities(capabilities)
-.addMountLocation("/sys/fs/cgroup", "/sys/fs/cgroup:ro", false);
+.addMountLocation(CGROUPS_ROOT_DIRECTORY,
+CGROUPS_ROOT_DIRECTORY + ":ro", false);
 List allDirs = new ArrayList<>(containerLocalDirs);
 
 allDirs.addAll(filecacheDirs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f55ccb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index a05ff46..3253394 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -257,6 +257,18 @@ public class TestDockerContainerRuntime {
 return expectedCapabilitiesString.toString();
   }
 
+  private String getExpectedCGroupsMountString() {
+boolean cGroupsMountExists = new File(
+DockerLinuxContainerRuntime.CGROUPS_ROOT_DIRECTORY).exists();
+
+

[01/50] [abbrv] hadoop git commit: HDFS-8915. TestFSNamesystem.testFSLockGetWaiterCount fails intermittently. Contributed by Masatake Iwasaki.

2016-09-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12756 b2d0db8d1 -> 8b68b11bd


HDFS-8915. TestFSNamesystem.testFSLockGetWaiterCount fails intermittently. 
Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13fb1b50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13fb1b50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13fb1b50

Branch: refs/heads/HADOOP-12756
Commit: 13fb1b50e608558b2970184908ee5b9fcd7eb7b6
Parents: 46e02ab
Author: Kihwal Lee 
Authored: Fri Aug 26 09:51:32 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Aug 26 09:53:10 2016 -0500

--
 .../hdfs/server/namenode/TestFSNamesystem.java   | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13fb1b50/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index b9a2d15..572b40d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -30,6 +30,7 @@ import java.net.InetAddress;
 import java.net.URI;
 import java.util.Collection;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
@@ -56,6 +57,7 @@ import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.TimeoutException;
 
 public class TestFSNamesystem {
 
@@ -271,9 +273,16 @@ public class TestFSNamesystem {
 }
 
 latch.await();
-Thread.sleep(10); // Lets all threads get BLOCKED
-Assert.assertEquals("Expected number of blocked thread not found",
-threadCount, rwLock.getQueueLength());
+try {
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return (threadCount == rwLock.getQueueLength());
+}
+  }, 10, 1000);
+} catch (TimeoutException e) {
+  fail("Expected number of blocked thread not found");
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HDFS-10833. Fix JSON errors in WebHDFS.md examples.

2016-09-06 Thread drankye
HDFS-10833. Fix JSON errors in WebHDFS.md examples.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbd909ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbd909ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbd909ce

Branch: refs/heads/HADOOP-12756
Commit: cbd909ce2a5ac1da258f756fa1f93e84dd20b926
Parents: 378f624
Author: Andrew Wang 
Authored: Fri Sep 2 14:50:34 2016 -0700
Committer: Andrew Wang 
Committed: Fri Sep 2 14:50:34 2016 -0700

--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md  | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbd909ce/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 30f583e..c62fb2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -1294,7 +1294,7 @@ All operations, except for 
[`OPEN`](#Open_and_Read_a_File), either return a zero
   {
 "entries":
 {
-  "type": "array"
+  "type": "array",
   "items":
   {
 "description": "ACL entry.",
@@ -1318,7 +1318,7 @@ All operations, except for 
[`OPEN`](#Open_and_Read_a_File), either return a zero
   "description": "True if the sticky bit is on.",
   "type"   : "boolean",
   "required"   : true
-},
+}
   }
 }
   }
@@ -1337,7 +1337,7 @@ All operations, except for 
[`OPEN`](#Open_and_Read_a_File), either return a zero
   "type"  : "array",
   "items":
   {
-"type"" "object",
+"type": "object",
 "properties":
 {
   "name":
@@ -1368,7 +1368,7 @@ All operations, except for 
[`OPEN`](#Open_and_Read_a_File), either return a zero
 "XAttrNames":
 {
   "description": "XAttr names.",
-  "type"   : "string"
+  "type"   : "string",
   "required"   : true
 }
   }
@@ -1737,7 +1737,7 @@ See also: [`GETHOMEDIRECTORY`](#Get_Home_Directory), 
[Path](../../api/org/apache
 "javaClassName": //an optional 
property
 {
   "description": "Java class name of the exception",
-  "type"   : "string",
+  "type"   : "string"
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDFS-10814. Add assertion for getNumEncryptionZones when no EZ is created. Contributed by Vinitha Reddy Gankidi.

2016-09-06 Thread drankye
HDFS-10814. Add assertion for getNumEncryptionZones when no EZ is created. 
Contributed by Vinitha Reddy Gankidi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bd45f54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bd45f54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bd45f54

Branch: refs/heads/HADOOP-12756
Commit: 4bd45f54eedd449a98a90540698c6ceb47454fec
Parents: cd5e10c
Author: Zhe Zhang 
Authored: Mon Aug 29 23:37:26 2016 -0700
Committer: Zhe Zhang 
Committed: Mon Aug 29 23:37:26 2016 -0700

--
 .../test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bd45f54/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index e221fe4..53c12ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -326,7 +326,9 @@ public class TestEncryptionZones {
   public void testBasicOperations() throws Exception {
 
 int numZones = 0;
-
+/* Number of EZs should be 0 if no EZ is created */
+assertEquals("Unexpected number of encryption zones!", numZones,
+cluster.getNamesystem().getNumEncryptionZones());
 /* Test failure of create EZ on a directory that doesn't exist. */
 final Path zoneParent = new Path("/zones");
 final Path zone1 = new Path(zoneParent, "zone1");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: YARN-5566. Client-side NM graceful decom is not triggered when jobs finish. (Robert Kanter via kasha)

2016-09-06 Thread drankye
YARN-5566. Client-side NM graceful decom is not triggered when jobs finish. 
(Robert Kanter via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74f4bae4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74f4bae4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74f4bae4

Branch: refs/heads/HADOOP-12756
Commit: 74f4bae45597f4794e99e33309130ddff647b21f
Parents: 76cd81f
Author: Karthik Kambatla 
Authored: Thu Sep 1 14:44:01 2016 -0700
Committer: Karthik Kambatla 
Committed: Thu Sep 1 14:44:01 2016 -0700

--
 .../resourcemanager/rmnode/RMNodeImpl.java  | 13 +++--
 .../resourcemanager/TestRMNodeTransitions.java  | 29 ++--
 .../TestResourceTrackerService.java |  4 ++-
 3 files changed, 34 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74f4bae4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index d1ccecb..375b4cf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -1170,12 +1170,21 @@ public class RMNodeImpl implements RMNode, 
EventHandler {
   NodeState initialState = rmNode.getState();
   boolean isNodeDecommissioning =
   initialState.equals(NodeState.DECOMMISSIONING);
+  if (isNodeDecommissioning) {
+List keepAliveApps = statusEvent.getKeepAliveAppIds();
+if (rmNode.runningApplications.isEmpty() &&
+(keepAliveApps == null || keepAliveApps.isEmpty())) {
+  RMNodeImpl.deactivateNode(rmNode, NodeState.DECOMMISSIONED);
+  return NodeState.DECOMMISSIONED;
+}
+  }
+
   if (!remoteNodeHealthStatus.getIsNodeHealthy()) {
 LOG.info("Node " + rmNode.nodeId +
 " reported UNHEALTHY with details: " +
 remoteNodeHealthStatus.getHealthReport());
 // if a node in decommissioning receives an unhealthy report,
-// it will keep decommissioning.
+// it will stay in decommissioning.
 if (isNodeDecommissioning) {
   return NodeState.DECOMMISSIONING;
 } else {
@@ -1349,7 +1358,7 @@ public class RMNodeImpl implements RMNode, 
EventHandler {
   + " is the first container get launched for application "
   + containerAppId);
 }
-runningApplications.add(containerAppId);
+handleRunningAppOnNode(this, context, containerAppId, nodeId);
   }
 
   // Process running containers

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74f4bae4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index e82b93c..6038b31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.event.InlineDispatcher;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer
 .AllocationExpirationInfo;
 import 
org.apache.hadoop.yarn

[22/50] [abbrv] hadoop git commit: HDFS-4210. Throw helpful exception when DNS entry for JournalNode cannot be resolved. Contributed by Charles Lamb and John Zhuge.

2016-09-06 Thread drankye
HDFS-4210. Throw helpful exception when DNS entry for JournalNode cannot be 
resolved. Contributed by Charles Lamb and John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd5e10cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd5e10cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd5e10cc

Branch: refs/heads/HADOOP-12756
Commit: cd5e10ccca9b919d55ef7a500e61b44bd1a00171
Parents: 05ede00
Author: Xiao Chen 
Authored: Mon Aug 29 17:41:01 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 29 17:48:08 2016 -0700

--
 .../qjournal/client/QuorumJournalManager.java|  9 +++--
 .../hdfs/qjournal/client/TestQJMWithFaults.java  | 19 ++-
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd5e10cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index c32b667..ae3358b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
+import java.net.UnknownHostException;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -387,8 +388,12 @@ public class QuorumJournalManager implements 
JournalManager {
 
 List addrs = Lists.newArrayList();
 for (String addr : parts) {
-  addrs.add(NetUtils.createSocketAddr(
-  addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT));
+  InetSocketAddress isa = NetUtils.createSocketAddr(
+  addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT);
+  if (isa.isUnresolved()) {
+throw new UnknownHostException(addr);
+  }
+  addrs.add(isa);
 }
 return addrs;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd5e10cc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
index b0a9b99..ecdbaf5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
@@ -27,7 +27,9 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.net.URISyntaxException;
+import java.net.UnknownHostException;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
@@ -52,7 +54,9 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -125,7 +129,10 @@ public class TestQJMWithFaults {
 }
 return ret;
   }
-  
+
+  @Rule
+  public ExpectedException expectedException = ExpectedException.none();
+
   /**
* Sets up two of the nodes to each drop a single RPC, at all
* possible combinations of RPCs. This may result in the
@@ -187,6 +194,16 @@ public class TestQJMWithFaults {
   }
   
   /**
+   * Expect {@link UnknownHostException} if a hostname can't be resolved.
+   */
+  @Test
+  public void testUnresolvableHostName() throws Exception {
+expectedException.expect(UnknownHostException.class);
+new QuorumJournalManager(conf,
+new URI("qjournal://" + "bogus:12345" + "/" + JID), FAKE_NSINFO);
+  }
+
+  /**
* Test case in which three JournalNodes randomly flip flop between
* up and down states every time they get an RPC.
* 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apac

[03/50] [abbrv] hadoop git commit: YARN-5373. NPE listing wildcard directory in containerLaunch. (Daniel Templeton via kasha)

2016-09-06 Thread drankye
YARN-5373. NPE listing wildcard directory in containerLaunch. (Daniel Templeton 
via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cde3a005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cde3a005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cde3a005

Branch: refs/heads/HADOOP-12756
Commit: cde3a00526c562a500308232e2b93498d22c90d7
Parents: 9ef632f
Author: Karthik Kambatla 
Authored: Fri Aug 26 11:04:33 2016 -0700
Committer: Karthik Kambatla 
Committed: Fri Aug 26 11:04:33 2016 -0700

--
 .../server/nodemanager/ContainerExecutor.java   |  42 +++--
 .../nodemanager/DockerContainerExecutor.java|   4 +-
 .../nodemanager/LinuxContainerExecutor.java |  40 -
 .../launcher/ContainerLaunch.java   |   2 +-
 .../linux/privileged/PrivilegedOperation.java   |   6 +-
 .../impl/container-executor.c   |  54 ++-
 .../impl/container-executor.h   |  10 +-
 .../main/native/container-executor/impl/main.c  |  40 +++--
 .../test/test-container-executor.c  | 157 +++
 .../launcher/TestContainerLaunch.java   |  12 +-
 10 files changed, 325 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cde3a005/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index c73c4c7..818b0ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -67,8 +67,8 @@ import org.apache.hadoop.util.StringUtils;
  * underlying OS.  All executor implementations must extend ContainerExecutor.
  */
 public abstract class ContainerExecutor implements Configurable {
-  private static final String WILDCARD = "*";
   private static final Log LOG = LogFactory.getLog(ContainerExecutor.class);
+  protected static final String WILDCARD = "*";
 
   /**
* The permissions to use when creating the launch script.
@@ -274,15 +274,16 @@ public abstract class ContainerExecutor implements 
Configurable {
* @param environment the environment variables and their values
* @param resources the resources which have been localized for this
* container. Symlinks will be created to these localized resources
-   * @param command the command that will be run.
-   * @param logDir the log dir to copy debugging information to
+   * @param command the command that will be run
+   * @param logDir the log dir to which to copy debugging information
+   * @param user the username of the job owner
* @throws IOException if any errors happened writing to the OutputStream,
* while creating symlinks
*/
   public void writeLaunchEnv(OutputStream out, Map environment,
-  Map> resources, List command, Path logDir)
-  throws IOException {
-this.writeLaunchEnv(out, environment, resources, command, logDir,
+  Map> resources, List command, Path logDir,
+  String user) throws IOException {
+this.writeLaunchEnv(out, environment, resources, command, logDir, user,
 ContainerLaunch.CONTAINER_SCRIPT);
   }
 
@@ -295,17 +296,17 @@ public abstract class ContainerExecutor implements 
Configurable {
* @param environment the environment variables and their values
* @param resources the resources which have been localized for this
* container. Symlinks will be created to these localized resources
-   * @param command the command that will be run.
-   * @param logDir the log dir to copy debugging information to
+   * @param command the command that will be run
+   * @param logDir the log dir to which to copy debugging information
+   * @param user the username of the job owner
* @param outFilename the path to which to write the launch environment
* @throws IOException if any errors happened writing to the OutputStream,
* while creating symlinks
*/
   @VisibleForTesting
-  public void writeLaunchEnv(OutputStream out,
-  Map environment, Map> resources,
-  List command, Path logDir, String outFilename)
-  throws IOException {
+  public void writeLaunchEnv(OutputStre

[13/50] [abbrv] hadoop git commit: YARN-5560. Clean up bad exception catching practices in TestYarnClient. Contributed by Sean Po

2016-09-06 Thread drankye
YARN-5560. Clean up bad exception catching practices in TestYarnClient. 
Contributed by Sean Po


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cbe6140
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cbe6140
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cbe6140

Branch: refs/heads/HADOOP-12756
Commit: 4cbe61407dcb71f099bc7ec6ae87560d786ee714
Parents: 92d8f37
Author: Jason Lowe 
Authored: Mon Aug 29 16:14:55 2016 +
Committer: Jason Lowe 
Committed: Mon Aug 29 16:14:55 2016 +

--
 .../yarn/client/api/impl/TestYarnClient.java| 172 ---
 1 file changed, 38 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cbe6140/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index dd19acb..e462be1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -69,8 +69,6 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
@@ -158,7 +156,7 @@ public class TestYarnClient {
 
   @SuppressWarnings("deprecation")
   @Test (timeout = 3)
-  public void testSubmitApplication() {
+  public void testSubmitApplication() throws Exception {
 Configuration conf = new Configuration();
 conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,
 100); // speed up tests
@@ -184,8 +182,6 @@ public class TestYarnClient {
   Assert.assertTrue(e instanceof ApplicationIdNotProvidedException);
   Assert.assertTrue(e.getMessage().contains(
   "ApplicationId is not provided in ApplicationSubmissionContext"));
-} catch (IOException e) {
-  Assert.fail("IOException is not expected.");
 }
 
 // Submit the application with applicationId provided
@@ -197,13 +193,7 @@ public class TestYarnClient {
   System.currentTimeMillis(), i);
   when(context.getApplicationId()).thenReturn(applicationId);
   ((MockYarnClient) client).setYarnApplicationState(exitStates[i]);
-  try {
-client.submitApplication(context);
-  } catch (YarnException e) {
-Assert.fail("Exception is not expected.");
-  } catch (IOException e) {
-Assert.fail("Exception is not expected.");
-  }
+  client.submitApplication(context);
   verify(((MockYarnClient) client).mockReport,times(4 * i + 4))
   .getYarnApplicationState();
 }
@@ -583,12 +573,11 @@ public class TestYarnClient {
 .thenReturn(mockNodeToLabelsResponse);
 
 historyClient = mock(AHSClient.class);
-
-  } catch (YarnException e) {
-Assert.fail("Exception is not expected.");
-  } catch (IOException e) {
-Assert.fail("Exception is not expected.");
+
+  } catch (Exception e) {
+Assert.fail("Unexpected exception caught: " + e);
   }
+
   when(mockResponse.getApplicationReport()).thenReturn(mockReport);
 }
 
@@ -993,36 +982,20 @@ public class TestYarnClient {
 return appId;
   }
 
-  private GetNewReservationResponse getNewReservation(YarnClient rmClient) {
-GetNewReservationRequest newReservationRequest = GetNewReservationRequest
-.newInstance();
-GetNewReservationResponse getNewReservationResponse = null;
-try {
-  getNewReservationResponse = rmClient.createReservation();
-} catch (Exception e) {
-  Assert.fail(e.getMessage());
-}
-return getNewReservationResponse;
-  }
-
   private void waitTillAccepted(YarnClient rmClient, ApplicationId appId,
   boolean unmanagedApplication)
 throws Exception {
-try {
-  long start = System.current

[24/50] [abbrv] hadoop git commit: MAPREDUCE-4784. TestRecovery occasionally fails. Contributed by Haibo Chen

2016-09-06 Thread drankye
MAPREDUCE-4784. TestRecovery occasionally fails. Contributed by Haibo Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af508605
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af508605
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af508605

Branch: refs/heads/HADOOP-12756
Commit: af508605a9edc126c170160291dbc2fe58b66dea
Parents: 4bd45f5
Author: Jason Lowe 
Authored: Tue Aug 30 13:59:57 2016 +
Committer: Jason Lowe 
Committed: Tue Aug 30 13:59:57 2016 +

--
 .../hadoop/mapreduce/v2/app/TestRecovery.java   | 27 +++-
 1 file changed, 21 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af508605/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
index 3ede2e9..9d5f0ae 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -35,6 +35,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import java.util.concurrent.TimeoutException;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
@@ -180,7 +181,10 @@ public class TestRecovery {
 Iterator itr = mapTask1.getAttempts().values().iterator();
 itr.next();
 TaskAttempt task1Attempt2 = itr.next();
-
+
+// wait for the second task attempt to be assigned.
+waitForContainerAssignment(task1Attempt2);
+
 // This attempt will automatically fail because of the way 
ContainerLauncher
 // is setup
 // This attempt 'disappears' from JobHistory and so causes MAPREDUCE-3846
@@ -318,6 +322,21 @@ public class TestRecovery {
   }
 
   /**
+   * Wait for a task attempt to be assigned a container to.
+   * @param task1Attempt2 the task attempt to wait for its container assignment
+   * @throws TimeoutException if times out
+   * @throws InterruptedException if interrupted
+   */
+  public static void waitForContainerAssignment(final TaskAttempt 
task1Attempt2)
+  throws TimeoutException, InterruptedException {
+GenericTestUtils.waitFor(new Supplier() {
+  @Override public Boolean get() {
+return task1Attempt2.getAssignedContainerID() != null;
+  }
+}, 10, 1);
+  }
+
+  /**
* AM with 3 maps and 0 reduce. AM crashes after the first two tasks finishes
* and recovers completely and succeeds in the second generation.
* 
@@ -1199,11 +1218,7 @@ public class TestRecovery {
 TaskAttempt task2Attempt = 
mapTask2.getAttempts().values().iterator().next();
 
 // wait for the second task attempt to be assigned.
-GenericTestUtils.waitFor(new Supplier() {
-  @Override public Boolean get() {
-return task1Attempt2.getAssignedContainerID() != null;
-  }
-}, 10, 1);
+waitForContainerAssignment(task1Attempt2);
 ContainerId t1a2contId = task1Attempt2.getAssignedContainerID();
 
 LOG.info(t1a2contId.toString());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HADOOP-12608. Fix exception message in WASB when connecting with anonymous credential. Contributed by Dushyanth.

2016-09-06 Thread drankye
HADOOP-12608. Fix exception message in WASB when connecting with anonymous 
credential. Contributed by Dushyanth.

(cherry picked from commit 007a8decc61750720033e06b6d08861ac3788e41)
(cherry picked from commit a65e159c59de41d26e12212dd1b7c28c28265cc3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b57be10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b57be10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b57be10

Branch: refs/heads/HADOOP-12756
Commit: 8b57be108f9de3b74c5d6465828241fd436bcb99
Parents: ed6ff5c
Author: Xiaoyu Yao 
Authored: Wed Dec 30 11:10:50 2015 -0800
Committer: Andrew Wang 
Committed: Mon Aug 29 15:30:49 2016 -0700

--
 .../fs/azure/AzureNativeFileSystemStore.java|  5 +-
 ...TestFileSystemOperationExceptionMessage.java | 78 
 2 files changed, 81 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b57be10/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 2615174..eaca82e 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -742,8 +742,9 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 try {
   if (!container.exists(getInstrumentedContext())) {
 throw new AzureException("Container " + containerName + " in account "
-+ accountName + " not found, and we can't create "
-+ " it using anoynomous credentials.");
++ accountName + " not found, and we can't create"
++ " it using anoynomous credentials, and no credentials found for 
them"
++ " in the configuration.");
   }
 } catch (StorageException ex) {
   throw new AzureException("Unable to access container " + containerName

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b57be10/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
new file mode 100644
index 000..57920a4
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+import java.net.URI;
+import java.util.UUID;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+public class TestFileSystemOperationExceptionMessage extends
+  NativeAzureFileSystemBaseTest {
+
+  @Test
+  public void testAnonymouseCredentialExceptionMessage() throws Throwable{
+
+Configuration conf = AzureBlobStorageTestAccount.createTestConfiguration();
+String testStorageAccount = conf.get("fs.azure.test.account.name");
+conf = new Configuration();
+conf.set("fs.AbstractFileSystem.wasb.impl", 
"org.apache.hadoop.fs.azure.Wasb");
+conf.set("fs.azure.skip.metrics", "true");
+
+String testContainer = UUID.randomUUID().toString();
+String wasbUri = String.format("wasb://%s@%s",
+testContainer, testStorageAccount);
+
+String expectedErrorMessage =
+String.format("Container %s in account %s not found, and we can't 
create it "
++ "using anoynomous credentials, and no credentials found for "
+ 

[14/50] [abbrv] hadoop git commit: HADOOP-7363. TestRawLocalFileSystemContract is needed. Contributed by Andras Bokor.

2016-09-06 Thread drankye
HADOOP-7363. TestRawLocalFileSystemContract is needed. Contributed by Andras 
Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1ad598c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1ad598c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1ad598c

Branch: refs/heads/HADOOP-12756
Commit: e1ad598cef61cbd3a6f505f40221c8140a36b7e4
Parents: 4cbe614
Author: Anu Engineer 
Authored: Mon Aug 29 09:19:35 2016 -0700
Committer: Anu Engineer 
Committed: Mon Aug 29 09:26:46 2016 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 46 
 .../fs/TestRawLocalFileSystemContract.java  | 75 
 2 files changed, 107 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ad598c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 5b8987c..bbd7336 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -172,22 +172,39 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   public void testMkdirsWithUmask() throws Exception {
-if (fs.getScheme().equals("s3n")) {
-  // skip permission tests for S3FileSystem until HDFS-1333 is fixed.
-  return;
+if (!isS3(fs)) {
+  Configuration conf = fs.getConf();
+  String oldUmask = 
conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
+  try {
+conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
+final Path dir = path("/test/newDir");
+assertTrue(fs.mkdirs(dir, new FsPermission((short) 0777)));
+FileStatus status = fs.getFileStatus(dir);
+assertTrue(status.isDirectory());
+assertEquals((short) 0715, status.getPermission().toShort());
+  } finally {
+conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
+  }
 }
-Configuration conf = fs.getConf();
-String oldUmask = 
conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
+  }
+
+  /**
+   * Skip permission tests for S3FileSystem until HDFS-1333 is fixed.
+   * Classes that do not implement {@link FileSystem#getScheme()} method
+   * (e.g {@link RawLocalFileSystem}) will throw an
+   * {@link UnsupportedOperationException}.
+   * @param fileSystem FileSystem object to determine if it is S3 or not
+   * @return true if S3 false in any other case
+   */
+  private boolean isS3(FileSystem fileSystem) {
 try {
-  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
-  final Path dir = new Path("/test/newDir");
-  assertTrue(fs.mkdirs(dir, new FsPermission((short)0777)));
-  FileStatus status = fs.getFileStatus(dir);
-  assertTrue(status.isDirectory());
-  assertEquals((short)0715, status.getPermission().toShort());
-} finally {
-  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
+  if (fileSystem.getScheme().equals("s3n")) {
+return true;
+  }
+} catch (UnsupportedOperationException e) {
+  LOG.warn("Unable to determine the schema of filesystem.");
 }
+return false;
   }
 
   public void testGetFileStatusThrowsExceptionForNonExistentFile() 
@@ -480,7 +497,8 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
   
   protected Path path(String pathString) {
-return new Path(pathString).makeQualified(fs);
+return new Path(pathString).makeQualified(fs.getUri(),
+fs.getWorkingDirectory());
   }
   
   protected void createFile(Path path) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ad598c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
new file mode 100644
index 000..036fb6a
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreement

[26/50] [abbrv] hadoop git commit: HDFS-9392. Admins support for maintenance state. Contributed by Ming Ma.

2016-09-06 Thread drankye
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index f6b5d8f..ddb8237 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -26,17 +26,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.concurrent.ExecutionException;
 
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -64,11 +60,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
-import org.junit.After;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
@@ -78,90 +71,9 @@ import org.slf4j.LoggerFactory;
 /**
  * This class tests the decommissioning of nodes.
  */
-public class TestDecommission {
+public class TestDecommission extends AdminStatesBaseTest {
   public static final Logger LOG = LoggerFactory.getLogger(TestDecommission
   .class);
-  static final long seed = 0xDEADBEEFL;
-  static final int blockSize = 8192;
-  static final int fileSize = 16384;
-  static final int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
-  static final int BLOCKREPORT_INTERVAL_MSEC = 1000; //block report in msec
-  static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval
-
-  final Random myrand = new Random();
-  Path dir;
-  Path hostsFile;
-  Path excludeFile;
-  FileSystem localFileSys;
-  Configuration conf;
-  MiniDFSCluster cluster = null;
-
-  @Before
-  public void setup() throws IOException {
-conf = new HdfsConfiguration();
-// Set up the hosts/exclude files.
-localFileSys = FileSystem.getLocal(conf);
-Path workingDir = localFileSys.getWorkingDirectory();
-dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + 
"/work-dir/decommission");
-hostsFile = new Path(dir, "hosts");
-excludeFile = new Path(dir, "exclude");
-
-// Setup conf
-conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, 
false);
-conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
-conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
-conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
2000);
-conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
-conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
-conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 
BLOCKREPORT_INTERVAL_MSEC);
-
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
4);
-conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
NAMENODE_REPLICATION_INTERVAL);
-  
-writeConfigFile(hostsFile, null);
-writeConfigFile(excludeFile, null);
-  }
-  
-  @After
-  public void teardown() throws IOException {
-cleanupFile(localFileSys, dir);
-if (cluster != null) {
-  cluster.shutdown();
-  cluster = null;
-}
-  }
-  
-  private void writeConfigFile(Path name, List nodes) 
-throws IOException {
-// delete if it already exists
-if (localFileSys.exists(name)) {
-  localFileSys.delete(name, true);
-}
-
-FSDataOutputStream stm = localFileSys.create(name);
-
-if (nodes != null) {
-  for (Iterator it = nodes.iterator(); it.hasNext();) {
-String node = it.next();
-stm.writeBytes(node);
-stm.writeBytes("\n");
-  }
-}
-stm.close();
-  }
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-throws IOException {
-// create and write a file that contains three blocks of data
-FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-(short) repl, blockSize);
-byte[] buffer = new byte[fileSize];
-Random rand = ne

hadoop git commit: HADOOP-13218. Migrate other Hadoop side tests to prepare for removing WritableRPCEngine. Contributed by Wei Zhou and Kai Zheng

2016-09-06 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk f9557127b -> 62a966713


HADOOP-13218. Migrate other Hadoop side tests to prepare for removing 
WritableRPCEngine. Contributed by Wei Zhou and Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62a96671
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62a96671
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62a96671

Branch: refs/heads/trunk
Commit: 62a9667136ebd8a048f556b534fcff4fdaf8e2ec
Parents: f955712
Author: Kai Zheng 
Authored: Wed Sep 7 17:05:33 2016 +0800
Committer: Kai Zheng 
Committed: Wed Sep 7 17:05:33 2016 +0800

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|   5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java|  15 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   4 +-
 .../hadoop/security/UserGroupInformation.java   |   4 +-
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |  38 +--
 .../hadoop/ipc/TestMultipleProtocolServer.java  | 236 +--
 .../apache/hadoop/ipc/TestRPCCallBenchmark.java |  13 -
 .../apache/hadoop/ipc/TestRPCCompatibility.java | 242 ++-
 .../apache/hadoop/ipc/TestRPCWaitForProxy.java  |  37 ++-
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  50 +++-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java |  74 +++--
 .../hadoop/security/TestDoAsEffectiveUser.java  | 291 +++
 .../security/TestUserGroupInformation.java  |  28 +-
 .../hadoop-common/src/test/proto/test.proto |   4 +-
 .../src/test/proto/test_rpc_service.proto   |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   3 -
 .../TestClientProtocolWithDelegationToken.java  | 119 
 .../mapreduce/v2/hs/server/HSAdminServer.java   |   3 -
 18 files changed, 294 insertions(+), 876 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62a96671/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 83e4b9e..e68bfd4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -60,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   private static final ThreadLocal>
   ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
 
-  static { // Register the rpcRequest deserializer for WritableRpcEngine 
+  static { // Register the rpcRequest deserializer for ProtobufRpcEngine
 org.apache.hadoop.ipc.Server.registerProtocolEngine(
 RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
 new Server.ProtoBufRpcInvoker());
@@ -194,7 +194,8 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
   
   if (args.length != 2) { // RpcController + Message
-throw new ServiceException("Too many parameters for request. Method: ["
+throw new ServiceException(
+"Too many or few parameters for request. Method: ["
 + method.getName() + "]" + ", Expected: 2, Actual: "
 + args.length);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62a96671/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 3f68d63..12a07a5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ipc;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Proxy;
@@ -26,7 +28,6 @@ import java.net.ConnectException;
 import java.net.InetSocketAddress;
 import java.net.NoRouteToHostException;
 import java.net.SocketTimeoutException;
-import java.io.*;
 import java.io.Closeable;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -37,11 +38,12 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Conf

hadoop git commit: Revert "HADOOP-13218. Migrate other Hadoop side tests to prepare for removing WritableRPCEngine. Contributed by Wei Zhou and Kai Zheng"

2016-09-07 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk f414d5e11 -> d355573f5


Revert "HADOOP-13218. Migrate other Hadoop side tests to prepare for removing 
WritableRPCEngine. Contributed by Wei Zhou and Kai Zheng"

This reverts commit 62a9667136ebd8a048f556b534fcff4fdaf8e2ec


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d355573f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d355573f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d355573f

Branch: refs/heads/trunk
Commit: d355573f5681f43e760a1bc23ebed553bd35fca5
Parents: f414d5e
Author: Kai Zheng 
Authored: Thu Sep 8 05:50:17 2016 +0800
Committer: Kai Zheng 
Committed: Thu Sep 8 05:50:17 2016 +0800

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|   5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java|  15 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   4 +-
 .../hadoop/security/UserGroupInformation.java   |   4 +-
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java |  38 ++-
 .../hadoop/ipc/TestMultipleProtocolServer.java  | 236 ++-
 .../apache/hadoop/ipc/TestRPCCallBenchmark.java |  13 +
 .../apache/hadoop/ipc/TestRPCCompatibility.java | 242 +--
 .../apache/hadoop/ipc/TestRPCWaitForProxy.java  |  37 +--
 .../java/org/apache/hadoop/ipc/TestRpcBase.java |  50 +---
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java |  74 ++---
 .../hadoop/security/TestDoAsEffectiveUser.java  | 291 ---
 .../security/TestUserGroupInformation.java  |  28 +-
 .../hadoop-common/src/test/proto/test.proto |   4 +-
 .../src/test/proto/test_rpc_service.proto   |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   3 +
 .../TestClientProtocolWithDelegationToken.java  | 119 
 .../mapreduce/v2/hs/server/HSAdminServer.java   |   3 +
 18 files changed, 876 insertions(+), 294 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d355573f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e68bfd4..83e4b9e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -60,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   private static final ThreadLocal>
   ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
 
-  static { // Register the rpcRequest deserializer for ProtobufRpcEngine
+  static { // Register the rpcRequest deserializer for WritableRpcEngine 
 org.apache.hadoop.ipc.Server.registerProtocolEngine(
 RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
 new Server.ProtoBufRpcInvoker());
@@ -194,8 +194,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
   
   if (args.length != 2) { // RpcController + Message
-throw new ServiceException(
-"Too many or few parameters for request. Method: ["
+throw new ServiceException("Too many parameters for request. Method: ["
 + method.getName() + "]" + ", Expected: 2, Actual: "
 + args.length);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d355573f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 12a07a5..3f68d63 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.ipc;
 
-import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Proxy;
@@ -28,6 +26,7 @@ import java.net.ConnectException;
 import java.net.InetSocketAddress;
 import java.net.NoRouteToHostException;
 import java.net.SocketTimeoutException;
+import java.io.*;
 import java.io.Closeable;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -38,12 +37,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.*;
+
 import org.apache.had

[07/32] hadoop git commit: HDFS-10844. test_libhdfs_threaded_hdfs_static and test_libhdfs_zerocopy_hdfs_static are failing.

2016-09-13 Thread drankye
HDFS-10844. test_libhdfs_threaded_hdfs_static and 
test_libhdfs_zerocopy_hdfs_static are failing.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d1bf53c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d1bf53c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d1bf53c

Branch: refs/heads/HADOOP-12756
Commit: 2d1bf53c7e208ad951ebed7ee3f2e44582dfd151
Parents: 63f5948
Author: Akira Ajisaka 
Authored: Fri Sep 9 00:49:22 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Sep 9 00:49:22 2016 +0900

--
 .../src/main/native/libhdfs/hdfs.c| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d1bf53c/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index 4618dbb..1dcc768 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -114,7 +114,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
 jthr = invokeMethod(env, &jVal, INSTANCE, file->file, 
   "org/apache/hadoop/hdfs/client/HdfsDataInputStream",
   "getReadStatistics",
-  "()Lorg/apache/hadoop/hdfs/DFSInputStream$ReadStatistics;");
+  "()Lorg/apache/hadoop/hdfs/ReadStatistics;");
 if (jthr) {
 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
 "hdfsFileGetReadStatistics: getReadStatistics failed");
@@ -127,7 +127,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
 goto done;
 }
 jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-  "org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
+  "org/apache/hadoop/hdfs/ReadStatistics",
   "getTotalBytesRead", "()J");
 if (jthr) {
 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -137,7 +137,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
 s->totalBytesRead = jVal.j;
 
 jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-  "org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
+  "org/apache/hadoop/hdfs/ReadStatistics",
   "getTotalLocalBytesRead", "()J");
 if (jthr) {
 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -147,7 +147,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
 s->totalLocalBytesRead = jVal.j;
 
 jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-  "org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
+  "org/apache/hadoop/hdfs/ReadStatistics",
   "getTotalShortCircuitBytesRead", "()J");
 if (jthr) {
 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -156,7 +156,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
 }
 s->totalShortCircuitBytesRead = jVal.j;
 jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
-  "org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
+  "org/apache/hadoop/hdfs/ReadStatistics",
   "getTotalZeroCopyBytesRead", "()J");
 if (jthr) {
 ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/32] hadoop git commit: HDFS-10778. Add -format option to make the output of FileDistribution processor human-readable in OfflineImageViewer.

2016-09-13 Thread drankye
HDFS-10778. Add -format option to make the output of FileDistribution processor 
human-readable in OfflineImageViewer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63f59489
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63f59489
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63f59489

Branch: refs/heads/HADOOP-12756
Commit: 63f594892ecd4687e37a99790288e36eb278849f
Parents: d355573
Author: Akira Ajisaka 
Authored: Thu Sep 8 15:13:43 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Sep 8 15:13:43 2016 +0900

--
 .../FileDistributionCalculator.java |  20 +++-
 .../FileDistributionVisitor.java|  28 -
 .../offlineImageViewer/OfflineImageViewer.java  | 116 ++-
 .../OfflineImageViewerPB.java   |  78 +++--
 .../src/site/markdown/HDFSCommands.md   |   1 +
 .../src/site/markdown/HdfsImageViewer.md|   1 +
 .../TestOfflineImageViewer.java |  24 +++-
 7 files changed, 164 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f59489/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
index 33ab641..71fb822 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
 import org.apache.hadoop.util.LimitInputStream;
+import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.base.Preconditions;
 
@@ -75,11 +76,14 @@ final class FileDistributionCalculator {
   private long totalSpace;
   private long maxFileSize;
 
+  private boolean formatOutput = false;
+
   FileDistributionCalculator(Configuration conf, long maxSize, int steps,
-  PrintStream out) {
+  boolean formatOutput, PrintStream out) {
 this.conf = conf;
 this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
 this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
+this.formatOutput = formatOutput;
 this.out = out;
 long numIntervals = this.maxSize / this.steps;
 // avoid OutOfMemoryError when allocating an array
@@ -148,10 +152,20 @@ final class FileDistributionCalculator {
 
   private void output() {
 // write the distribution into the output file
-out.print("Size\tNumFiles\n");
+out.print((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
 for (int i = 0; i < distribution.length; i++) {
   if (distribution[i] != 0) {
-out.print(((long) i * steps) + "\t" + distribution[i]);
+if (formatOutput) {
+  out.print((i == 0 ? "[" : "(")
+  + StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * steps))
+  + ", "
+  + StringUtils.byteDesc((long)
+  (i == distribution.length - 1 ? maxFileSize : i * steps))
+  + "]\t" + distribution[i]);
+} else {
+  out.print(((long) i * steps) + "\t" + distribution[i]);
+}
+
 out.print('\n');
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63f59489/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
index 1cef720..7dcc299 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 import java.io.IOException;
 import java.util.LinkedList;
 
+import org.apache.hadoop.util.StringUtils;
+
 /**
  * File size distribution visitor.
  * 
@@ -67,6 +69,7

[15/32] hadoop git commit: HDFS-10553. DiskBalancer: Rename Tools/DiskBalancer class to Tools/DiskBalancerCLI. Contributed by Manoj Govindassamy.

2016-09-13 Thread drankye
HDFS-10553. DiskBalancer: Rename Tools/DiskBalancer class to 
Tools/DiskBalancerCLI. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35c5943b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35c5943b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35c5943b

Branch: refs/heads/HADOOP-12756
Commit: 35c5943b8ba39419140cdfc5e6127053ee97
Parents: b07c266
Author: Anu Engineer 
Authored: Thu Sep 8 19:26:56 2016 -0700
Committer: Anu Engineer 
Committed: Thu Sep 8 19:26:56 2016 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../diskbalancer/command/CancelCommand.java |  23 +-
 .../server/diskbalancer/command/Command.java|   6 +-
 .../diskbalancer/command/ExecuteCommand.java|  15 +-
 .../diskbalancer/command/HelpCommand.java   |  22 +-
 .../diskbalancer/command/PlanCommand.java   |  63 +--
 .../diskbalancer/command/QueryCommand.java  |  19 +-
 .../diskbalancer/command/ReportCommand.java |  18 +-
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 482 ---
 .../hadoop/hdfs/tools/DiskBalancerCLI.java  | 482 +++
 .../command/TestDiskBalancerCommand.java|  16 +-
 11 files changed, 576 insertions(+), 572 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 5059528..7a90f08 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -127,7 +127,7 @@ function hdfscmd_case
   HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 ;;
 diskbalancer)
-  HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
+  HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
   hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
   HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c5943b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
index 8b83e27..007272e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
 
 import java.io.IOException;
 
@@ -44,9 +44,10 @@ public class CancelCommand extends Command {
*/
   public CancelCommand(Configuration conf) {
 super(conf);
-addValidCommandParameters(DiskBalancer.CANCEL, "Cancels a running plan.");
-addValidCommandParameters(DiskBalancer.NODE, "Node to run the command " +
-"against in node:port format.");
+addValidCommandParameters(DiskBalancerCLI.CANCEL,
+"Cancels a running plan.");
+addValidCommandParameters(DiskBalancerCLI.NODE,
+"Node to run the command against in node:port format.");
   }
 
   /**
@@ -57,20 +58,20 @@ public class CancelCommand extends Command {
   @Override
   public void execute(CommandLine cmd) throws Exception {
 LOG.info("Executing \"Cancel plan\" command.");
-Preconditions.checkState(cmd.hasOption(DiskBalancer.CANCEL));
-verifyCommandOptions(DiskBalancer.CANCEL, cmd);
+Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
+verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
 
 // We can cancel a plan using datanode address and plan ID
 // that you can read from a datanode using queryStatus
-if(cmd.hasOption(DiskBalancer.NODE)) {
-  String nodeAddress = cmd.getOptionValue(DiskBalancer.NODE);
-  String planHash = cmd.getOptionValue(DiskBalancer.CANCEL);
+if(cmd.hasOption(DiskBalancerCLI.NODE)) {
+  String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
+  String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
   can

  1   2   3   4   5   6   >