[06/50] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang. Updated CHANGES-HDFS-EC-7285.txt

2015-04-29 Thread jing9
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b95f2335
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b95f2335
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b95f2335

Branch: refs/heads/HDFS-7285
Commit: b95f2335b5b31b66391b0701288a0e8c98c28a21
Parents: c0c6534
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:35:18 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:50 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 +
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95f2335/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 68d1d32..7716728 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -33,5 +33,7 @@
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
 
+HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
+
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95f2335/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3874cb4..9927ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,7 +49,4 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
-
-HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
-
+manage EC zones (Zhe Zhang)
\ No newline at end of file



[17/50] hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B)

2015-04-29 Thread jing9
HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96173cea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96173cea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96173cea

Branch: refs/heads/HDFS-7285
Commit: 96173ceac4ac8fd32c5f4c5414df1bb69d8466c9
Parents: 631916c
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 12:48:59 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:10 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 ++-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 14 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 
 .../ClientNamenodeProtocolTranslatorPB.java | 18 
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 46 
 .../hdfs/server/namenode/FSNamesystem.java  | 31 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +++
 .../src/main/proto/ClientNamenodeProtocol.proto | 10 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 28 
 .../hadoop/hdfs/TestErasureCodingZones.java | 38 +++-
 11 files changed, 223 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96173cea/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9927ccf..7423033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,4 +49,7 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
\ No newline at end of file
+manage EC zones (Zhe Zhang)
+
+HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
+NameNode (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96173cea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index f4eea49..16f876c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -117,6 +117,7 @@ import 
org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -3095,6 +3096,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECInfo getErasureCodingInfo(String src) throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(getErasureCodingInfo, src);
+try {
+  return namenode.getErasureCodingInfo(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  FileNotFoundException.class, UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
 return new DFSInotifyEventInputStream(traceSampler, namenode);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96173cea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 8efe344..45d92f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1464,4 +1464,14 @@ public interface ClientProtocol {
*/
   @Idempotent
   public EventBatchList getEditsFromTxid(long txid) 

[19/50] hadoop git commit: HDFS-8123. Erasure Coding: Better to move EC related proto messages to a separate erasurecoding proto file (Contrubuted by Rakesh R)

2015-04-29 Thread jing9
HDFS-8123. Erasure Coding: Better to move EC related proto messages to a 
separate erasurecoding proto file (Contrubuted by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de6b66d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de6b66d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de6b66d6

Branch: refs/heads/HDFS-7285
Commit: de6b66d6341471589ae702a1d36127acf79da64a
Parents: 3297989
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:09:16 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../ClientNamenodeProtocolTranslatorPB.java | 13 ++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  6 +-
 .../namenode/ErasureCodingZoneManager.java  |  2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto | 24 +--
 .../src/main/proto/erasurecoding.proto  | 74 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 27 ---
 9 files changed, 96 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de6b66d6/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5250dfa..07bbd4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -61,4 +61,7 @@
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
 
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
-ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file
+ECSchemas loaded in Namenode. (vinayakumarb)
+
+HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de6b66d6/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c11b963..a13a2bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -343,6 +343,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includehdfs.proto/include
   includeencryption.proto/include
   includeinotify.proto/include
+  includeerasurecoding.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de6b66d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 48f0efd..169ea2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -107,12 +107,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoRequestProto;
-import 

[33/50] hadoop git commit: HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. Contributed by Jing Zhao.

2015-04-29 Thread jing9
HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c0a2149
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c0a2149
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c0a2149

Branch: refs/heads/HDFS-7285
Commit: 1c0a214928c02e98d0e5a5d91952efbec41fa679
Parents: 5fbe138
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 18:13:47 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:54 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  7 --
 .../namenode/ErasureCodingZoneManager.java  | 12 +-
 .../hdfs/server/namenode/FSDirectory.java   |  6 ++---
 .../hdfs/server/namenode/FSEditLogLoader.java   | 13 ++-
 .../hdfs/server/namenode/FSImageFormat.java |  4 +---
 .../server/namenode/FSImageSerialization.java   | 13 +--
 .../blockmanagement/TestBlockInfoStriped.java   | 23 ++--
 .../hdfs/server/namenode/TestFSImage.java   |  2 +-
 8 files changed, 31 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c0a2149/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 9f2f5ba..23e3153 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -244,13 +244,6 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-out.writeShort(dataBlockNum);
-out.writeShort(parityBlockNum);
-super.write(out);
-  }
-
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c0a2149/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
index 0a84083..3f94227 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
@@ -54,10 +54,6 @@ public class ErasureCodingZoneManager {
 this.dir = dir;
   }
 
-  boolean getECPolicy(INodesInPath iip) throws IOException {
-return getECSchema(iip) != null;
-  }
-
   ECSchema getECSchema(INodesInPath iip) throws IOException {
 ECZoneInfo ecZoneInfo = getECZoneInfo(iip);
 return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
@@ -109,7 +105,7 @@ public class ErasureCodingZoneManager {
   throw new IOException(Attempt to create an erasure coding zone  +
   for a file.);
 }
-if (getECPolicy(srcIIP)) {
+if (getECSchema(srcIIP) != null) {
   throw new IOException(Directory  + src +  is already in an  +
   erasure coding zone.);
 }
@@ -132,8 +128,10 @@ public class ErasureCodingZoneManager {
   void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
   throws IOException {
 assert dir.hasReadLock();
-if (getECPolicy(srcIIP)
-!= getECPolicy(dstIIP)) {
+final ECSchema srcSchema = getECSchema(srcIIP);
+final ECSchema dstSchema = getECSchema(dstIIP);
+if ((srcSchema != null  !srcSchema.equals(dstSchema)) ||
+(dstSchema != null  !dstSchema.equals(srcSchema))) {
   throw new IOException(
   src +  can't be moved because the source and destination have  +
   different erasure coding policies.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c0a2149/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 

[15/50] hadoop git commit: HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng

2015-04-29 Thread jing9
HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44aa26ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44aa26ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44aa26ac

Branch: refs/heads/HDFS-7285
Commit: 44aa26ac4f1fee981f36d9f6a34776283147230a
Parents: a7a65f2
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 9 01:30:02 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:10 2015 -0700

--
 .../src/main/conf/ecschema-def.xml  |  5 --
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 57 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 +-
 .../hdfs/server/namenode/ECSchemaManager.java   | 62 
 4 files changed, 120 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44aa26ac/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
index e619485..e36d386 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -27,11 +27,6 @@ You can modify and remove those not used yet, or add new 
ones.
 --
 
 schemas
-  schema name=RS-6-3
-k6/k
-m3/m
-codecRS/codec
-  /schema
   schema name=RS-10-4
 k10/k
 m4/m

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44aa26ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 27be00e..8c3310e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -23,12 +23,12 @@ import java.util.Map;
 /**
  * Erasure coding schema to housekeeper relevant information.
  */
-public class ECSchema {
+public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = k;
   public static final String NUM_PARITY_UNITS_KEY = m;
   public static final String CODEC_NAME_KEY = codec;
   public static final String CHUNK_SIZE_KEY = chunkSize;
-  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   private String schemaName;
   private String codecName;
@@ -82,6 +82,18 @@ public class ECSchema {
   }
 
   /**
+   * Constructor with key parameters provided.
+   * @param schemaName
+   * @param codecName
+   * @param numDataUnits
+   * @param numParityUnits
+   */
+  public ECSchema(String schemaName, String codecName,
+  int numDataUnits, int numParityUnits) {
+this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  }
+
+  /**
* Constructor with key parameters provided. Note the options may contain
* additional information for the erasure codec to interpret further.
* @param schemaName
@@ -200,4 +212,45 @@ public class ECSchema {
 
 return sb.toString();
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+ECSchema ecSchema = (ECSchema) o;
+
+if (numDataUnits != ecSchema.numDataUnits) {
+  return false;
+}
+if (numParityUnits != ecSchema.numParityUnits) {
+  return false;
+}
+if (chunkSize != ecSchema.chunkSize) {
+  return false;
+}
+if (!schemaName.equals(ecSchema.schemaName)) {
+  return false;
+}
+if (!codecName.equals(ecSchema.codecName)) {
+  return false;
+}
+return options.equals(ecSchema.options);
+  }
+
+  @Override
+  public int hashCode() {
+int result = schemaName.hashCode();
+result = 31 * result + codecName.hashCode();
+result = 31 * result + options.hashCode();
+result = 31 * result + numDataUnits;
+result = 31 * result + numParityUnits;
+result = 31 * result + chunkSize;
+
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44aa26ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 

[18/50] hadoop git commit: HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. Contributed by Li Bo

2015-04-29 Thread jing9
HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. 
Contributed by Li Bo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ce17bc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ce17bc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ce17bc3

Branch: refs/heads/HDFS-7285
Commit: 1ce17bc366f0f8553f3bc0df606a5f4fe5e87083
Parents: 19f5d1f
Author: Kai Zheng kai.zh...@intel.com
Authored: Sat Apr 11 01:03:37 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:35 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  13 +-
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  |  26 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java | 439 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  11 +-
 .../apache/hadoop/hdfs/StripedDataStreamer.java | 241 ++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 311 +
 7 files changed, 1031 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ce17bc3/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 1e695c4..753795a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -56,4 +56,6 @@
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
-HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
+
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ce17bc3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 4646b60..99d48b2 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -266,8 +266,14 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
   Preconditions.checkNotNull(stat, HdfsFileStatus should not be null!);
-  final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
-  flag, progress, checksum, favoredNodes);
+  final DFSOutputStream out;
+  if(stat.getReplication() == 0) {
+out = new DFSStripedOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  } else {
+out = new DFSOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  }
   out.start();
   return out;
 } finally {
@@ -347,6 +353,9 @@ public class DFSOutputStream extends FSOutputSummer
   String[] favoredNodes) throws IOException {
 TraceScope scope =
 dfsClient.getPathTraceScope(newStreamForAppend, src);
+   if(stat.getReplication() == 0) {
+  throw new IOException(Not support appending to a striping layout file 
yet.);
+}
 try {
   final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
   progress, lastBlock, stat, checksum, favoredNodes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ce17bc3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
index 22055c3..9cd1ec1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
@@ -113,6 +114,19 @@ class DFSPacket {
 dataPos += len;
   }
 
+  

[28/50] hadoop git commit: HDFS-7994. Detect if resevered EC Block ID is already used during namenode startup. Contributed by Hui Zheng

2015-04-29 Thread jing9
HDFS-7994. Detect if resevered EC Block ID is already used during namenode 
startup. Contributed by Hui Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5683a3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5683a3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5683a3d

Branch: refs/heads/HDFS-7285
Commit: d5683a3d4502d666c326f0a30c0c6a11985e0c4f
Parents: d27f5dc
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 16 13:16:37 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:54 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   5 +-
 .../server/blockmanagement/BlockManager.java|  42 -
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSImageFormat.java |   6 +-
 .../server/namenode/FSImageFormatPBINode.java   |   2 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java| 106 
 .../hdfs/server/namenode/TestFSImage.java   | 169 ++-
 8 files changed, 321 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5683a3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b9fc6fa..78ca6d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -81,4 +81,7 @@
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
 
-HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
+
+HDFS-7994. Detect if resevered EC Block ID is already used during namenode
+startup. (Hui Zheng via szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5683a3d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8006405..dd00e6d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -273,6 +273,9 @@ public class BlockManager {
   /** Check whether name system is running before terminating */
   private boolean checkNSRunning = true;
 
+  /** Check whether there are any non-EC blocks using StripedID */
+  private boolean hasNonEcBlockUsingStripedID = false;
+
   public BlockManager(final Namesystem namesystem, final Configuration conf)
 throws IOException {
 this.namesystem = namesystem;
@@ -2912,6 +2915,24 @@ public class BlockManager {
   }
 
   /**
+   * Get the value of whether there are any non-EC blocks using StripedID.
+   *
+   * @return Returns the value of whether there are any non-EC blocks using 
StripedID.
+   */
+  public boolean hasNonEcBlockUsingStripedID(){
+return hasNonEcBlockUsingStripedID;
+  }
+
+  /**
+   * Set the value of whether there are any non-EC blocks using StripedID.
+   *
+   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
+   */
+  public void hasNonEcBlockUsingStripedID(boolean has){
+hasNonEcBlockUsingStripedID = has;
+  }
+
+  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3507,8 +3528,10 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-}
-if (info == null) {
+  if ((info == null)  hasNonEcBlockUsingStripedID()){
+info = blocksMap.getStoredBlock(block);
+  }
+} else {
   info = blocksMap.getStoredBlock(block);
 }
 return info;
@@ -3682,6 +3705,21 @@ public class BlockManager {
 return blocksMap.addBlockCollection(block, bc);
   }
 
+  /**
+   * Do some check when adding a block to blocksmap.
+   * For HDFS-7994 to check whether then block is a NonEcBlockUsingStripedID.
+   *
+   */
+  public BlockInfo 

[16/50] hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B) Added missed file

2015-04-29 Thread jing9
HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)
Added missed file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7a65f22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7a65f22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7a65f22

Branch: refs/heads/HDFS-7285
Commit: a7a65f225bbfca94a1baa383be634d687da782d2
Parents: 96173ce
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 14:23:03 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:10 2015 -0700

--
 .../org/apache/hadoop/hdfs/protocol/ECInfo.java | 41 
 1 file changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7a65f22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
new file mode 100644
index 000..ca642c2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+/**
+ * Class to provide information, such as ECSchema, for a file/block.
+ */
+public class ECInfo {
+  private final String src;
+  private final ECSchema schema;
+
+  public ECInfo(String src, ECSchema schema) {
+this.src = src;
+this.schema = schema;
+  }
+
+  public String getSrc() {
+return src;
+  }
+
+  public ECSchema getSchema() {
+return schema;
+  }
+}



[02/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit is for HDFS-8035). Contributed by Zhe Zhang

2015-04-29 Thread jing9
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit is for HDFS-8035). Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/979f453b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/979f453b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/979f453b

Branch: refs/heads/HDFS-7285
Commit: 979f453ba297213cd47670f1049d56f3c59aba5f
Parents: b0ef98e
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 10:37:23 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:49 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 11 +--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java |  8 
 2 files changed, 9 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/979f453b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9bd687a..90ec426 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3550,13 +3550,12 @@ public class BlockManager {
   String src, BlockInfo[] blocks) {
 for (BlockInfo b: blocks) {
   if (!b.isComplete()) {
-final BlockInfoContiguousUnderConstruction uc =
-(BlockInfoContiguousUnderConstruction)b;
 final int numNodes = b.numNodes();
-LOG.info(BLOCK*  + b +  is not COMPLETE (ucState = 
-  + uc.getBlockUCState() + , replication# =  + numNodes
-  + (numNodes  minReplication ?   :  = )
-  +  minimum =  + minReplication + ) in file  + src);
+final int min = getMinStorageNum(b);
+final BlockUCState state = b.getBlockUCState();
+LOG.info(BLOCK*  + b +  is not COMPLETE (ucState =  + state
++ , replication# =  + numNodes + (numNodes  min ?:  = 
)
++  minimum =  + min + ) in file  + src);
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/979f453b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1740365..9c71574 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3140,7 +3140,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
*/
   LocatedBlock storeAllocatedBlock(String src, long fileId, String clientName,
   ExtendedBlock previous, DatanodeStorageInfo[] targets) throws 
IOException {
-BlockInfo newBlockInfo = null;
+Block newBlock = null;
 long offset;
 checkOperation(OperationCategory.WRITE);
 waitForLoadingFSImage();
@@ -3173,8 +3173,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 ExtendedBlock.getLocalBlock(previous));
 
   // allocate new block, record block locations in INode.
-  Block newBlock = createNewBlock(isStriped);
-  newBlockInfo = saveAllocatedBlock(src, fileState.iip, newBlock, targets,
+  newBlock = createNewBlock(isStriped);
+  saveAllocatedBlock(src, fileState.iip, newBlock, targets,
   isStriped);
 
   persistNewBlock(src, pendingFile);
@@ -3185,7 +3185,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 getEditLog().logSync();
 
 // Return located block
-return makeLocatedBlock(newBlockInfo, targets, offset);
+return makeLocatedBlock(getStoredBlock(newBlock), targets, offset);
   }
 
   /*



[30/50] hadoop git commit: HADOOP-11841. Remove unused ecschema-def.xml files.

2015-04-29 Thread jing9
HADOOP-11841. Remove unused ecschema-def.xml files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54cad2d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54cad2d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54cad2d4

Branch: refs/heads/HDFS-7285
Commit: 54cad2d4dcb880e887786340f9dd155fbe842f58
Parents: 94d0e9a
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Fri Apr 17 16:07:07 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:54 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../src/main/conf/ecschema-def.xml  | 35 ---
 .../hadoop/fs/CommonConfigurationKeys.java  |  5 ---
 .../hadoop/io/erasurecode/SchemaLoader.java | 36 +++-
 .../hadoop/io/erasurecode/TestSchemaLoader.java | 12 ++-
 5 files changed, 25 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54cad2d4/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b850e11..9749270 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -42,3 +42,5 @@
 ( Kai Zheng via vinayakumarb )
   
 HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)
+
+HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54cad2d4/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
deleted file mode 100644
index e36d386..000
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-?xml version=1.0?
-
-!--
- 
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- License); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an AS IS BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
---
-
-!--
-Please define your EC schemas here. Note, once these schemas are loaded
-and referenced by EC storage policies, any change to them will be ignored.
-You can modify and remove those not used yet, or add new ones.
---
-
-schemas
-  schema name=RS-10-4
-k10/k
-m4/m
-codecRS/codec
-  /schema
-/schemas
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54cad2d4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 8a5211a..bd2a24b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -143,11 +143,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
 
-  public static final String IO_ERASURECODE_SCHEMA_FILE_KEY =
-  io.erasurecode.schema.file;
-  public static final String IO_ERASURECODE_SCHEMA_FILE_DEFAULT =
-  ecschema-def.xml;
-
   /** Use XOR raw coder when possible for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
   io.erasurecode.codec.rs.usexor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54cad2d4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff 

[12/50] hadoop git commit: HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R

2015-04-29 Thread jing9
HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17c271dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17c271dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17c271dd

Branch: refs/heads/HDFS-7285
Commit: 17c271dd85811c7d97dcb117e3f0be0ad11f5db2
Parents: ae85757
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 04:31:48 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:10 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  2 ++
 .../hadoop/io/erasurecode/SchemaLoader.java  | 12 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java   | 19 ++-
 .../io/erasurecode/coder/RSErasureEncoder.java   | 19 ++-
 .../io/erasurecode/coder/XORErasureDecoder.java  |  2 +-
 .../io/erasurecode/rawcoder/util/RSUtil.java | 17 +
 6 files changed, 62 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c271dd/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c72394e..b850e11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -40,3 +40,5 @@
 
 HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
 ( Kai Zheng via vinayakumarb )
+  
+HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c271dd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
index c51ed37..75dd03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.w3c.dom.*;
@@ -36,7 +36,7 @@ import java.util.*;
  * A EC schema loading utility that loads predefined EC schemas from XML file
  */
 public class SchemaLoader {
-  private static final Log LOG = 
LogFactory.getLog(SchemaLoader.class.getName());
+  private static final Logger LOG = 
LoggerFactory.getLogger(SchemaLoader.class.getName());
 
   /**
* Load predefined ec schemas from configuration file. This file is
@@ -63,7 +63,7 @@ public class SchemaLoader {
   private ListECSchema loadSchema(File schemaFile)
   throws ParserConfigurationException, IOException, SAXException {
 
-LOG.info(Loading predefined EC schema file  + schemaFile);
+LOG.info(Loading predefined EC schema file {}, schemaFile);
 
 // Read and parse the schema file.
 DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@@ -87,7 +87,7 @@ public class SchemaLoader {
   ECSchema schema = loadSchema(element);
 schemas.add(schema);
 } else {
-  LOG.warn(Bad element in EC schema configuration file:  +
+  LOG.warn(Bad element in EC schema configuration file: {},
   element.getTagName());
 }
   }
@@ -109,7 +109,7 @@ public class SchemaLoader {
   URL url = Thread.currentThread().getContextClassLoader()
   .getResource(schemaFilePath);
   if (url == null) {
-LOG.warn(schemaFilePath +  not found on the classpath.);
+LOG.warn({} not found on the classpath., schemaFilePath);
 schemaFile = null;
   } else if (! url.getProtocol().equalsIgnoreCase(file)) {
 throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c271dd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 

[40/50] hadoop git commit: HDFS-8156. Add/implement necessary APIs even we just have the system default schema. Contributed by Kai Zheng.

2015-04-29 Thread jing9
HDFS-8156. Add/implement necessary APIs even we just have the system default 
schema. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6a92533
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6a92533
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6a92533

Branch: refs/heads/HDFS-7285
Commit: e6a925338c0a99816b6e63e32b816a8416a271da
Parents: 12e161d
Author: Zhe Zhang z...@apache.org
Authored: Wed Apr 22 14:48:54 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 173 +++
 .../hadoop/io/erasurecode/TestECSchema.java |   2 +-
 .../hadoop/io/erasurecode/TestSchemaLoader.java |   6 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   2 +-
 .../hdfs/server/namenode/ECSchemaManager.java   |  79 -
 .../namenode/ErasureCodingZoneManager.java  |  16 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  29 +++-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   |   5 +-
 .../hadoop/hdfs/TestErasureCodingZones.java |  45 +++--
 10 files changed, 249 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6a92533/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 32077f6..f058ea7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode;
 
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -30,55 +31,80 @@ public final class ECSchema {
   public static final String CHUNK_SIZE_KEY = chunkSize;
   public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
-  private String schemaName;
-  private String codecName;
-  private MapString, String options;
-  private int numDataUnits;
-  private int numParityUnits;
-  private int chunkSize;
+  /**
+   * A friendly and understandable name that can mean what's it, also serves as
+   * the identifier that distinguish it from other schemas.
+   */
+  private final String schemaName;
+
+  /**
+   * The erasure codec name associated.
+   */
+  private final String codecName;
+
+  /**
+   * Number of source data units coded
+   */
+  private final int numDataUnits;
+
+  /**
+   * Number of parity units generated in a coding
+   */
+  private final int numParityUnits;
+
+  /**
+   * Unit data size for each chunk in a coding
+   */
+  private final int chunkSize;
+
+  /*
+   * An erasure code can have its own specific advanced parameters, subject to
+   * itself to interpret these key-value settings.
+   */
+  private final MapString, String extraOptions;
 
   /**
-   * Constructor with schema name and provided options. Note the options may
+   * Constructor with schema name and provided all options. Note the options 
may
* contain additional information for the erasure codec to interpret further.
* @param schemaName schema name
-   * @param options schema options
+   * @param allOptions all schema options
*/
-  public ECSchema(String schemaName, MapString, String options) {
+  public ECSchema(String schemaName, MapString, String allOptions) {
 assert (schemaName != null  ! schemaName.isEmpty());
 
 this.schemaName = schemaName;
 
-if (options == null || options.isEmpty()) {
+if (allOptions == null || allOptions.isEmpty()) {
   throw new IllegalArgumentException(No schema options are provided);
 }
 
-String codecName = options.get(CODEC_NAME_KEY);
+this.codecName = allOptions.get(CODEC_NAME_KEY);
 if (codecName == null || codecName.isEmpty()) {
   throw new IllegalArgumentException(No codec option is provided);
 }
 
-int dataUnits = 0, parityUnits = 0;
-try {
-  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
-dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
-  }
-} catch (NumberFormatException e) {
-  throw new IllegalArgumentException(Option value  +
-  options.get(NUM_DATA_UNITS_KEY) +  for  + NUM_DATA_UNITS_KEY +
-   is found. It should be an integer);
+int tmpNumDataUnits = extractIntOption(NUM_DATA_UNITS_KEY, allOptions);
+int tmpNumParityUnits = extractIntOption(NUM_PARITY_UNITS_KEY, allOptions);

[49/50] hadoop git commit: HDFS-8230. Erasure Coding: Ignore DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY commands from standbynode if any (Contributed by Vinayakumar B)

2015-04-29 Thread jing9
HDFS-8230. Erasure Coding: Ignore DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY 
commands from standbynode if any (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e8cea7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e8cea7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e8cea7a

Branch: refs/heads/HDFS-7285
Commit: 5e8cea7a540035b73ffbc55e456ed644eb0a199f
Parents: b65f508
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 28 14:14:33 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:17:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/BPOfferService.java| 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8cea7a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index e8db485..c28473b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -134,3 +134,6 @@
 
 HDFS-8033. Erasure coding: stateful (non-positional) read from files in 
 striped layout (Zhe Zhang)
+
+HDFS-8230. Erasure Coding: Ignore 
DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY 
+commands from standbynode if any (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e8cea7a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 69baac7..6606d0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -757,6 +757,7 @@ class BPOfferService {
 case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
 case DatanodeProtocol.DNA_CACHE:
 case DatanodeProtocol.DNA_UNCACHE:
+case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
   LOG.warn(Got a command from standby NN - ignoring command: + 
cmd.getAction());
   break;
 default:



[44/50] hadoop git commit: HDFS-8223. Should calculate checksum for parity blocks in DFSStripedOutputStream. Contributed by Yi Liu.

2015-04-29 Thread jing9
HDFS-8223. Should calculate checksum for parity blocks in 
DFSStripedOutputStream. Contributed by Yi Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24c2c06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24c2c06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24c2c06

Branch: refs/heads/HDFS-7285
Commit: f24c2c064b0a3b5005843c96c19f5549ddb158fb
Parents: cdda9db
Author: Jing Zhao ji...@apache.org
Authored: Thu Apr 23 15:48:21 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:56 2015 -0700

--
 .../main/java/org/apache/hadoop/fs/FSOutputSummer.java|  4 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  |  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedOutputStream.java| 10 ++
 3 files changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c2c06/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index bdc5585..a8a7494 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -196,6 +196,10 @@ abstract public class FSOutputSummer extends OutputStream {
 return sum.getChecksumSize();
   }
 
+  protected DataChecksum getDataChecksum() {
+return sum;
+  }
+
   protected TraceScope createWriteTraceScope() {
 return NullScope.INSTANCE;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c2c06/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 48791b1..9357e23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -125,3 +125,6 @@
 
 HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the 
last
 stripe is at the block group boundary. (jing9)
+
+HDFS-8223. Should calculate checksum for parity blocks in 
DFSStripedOutputStream.
+(Yi Liu via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c2c06/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 245dfc1..6842267 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -62,6 +62,8 @@ public class DFSStripedOutputStream extends DFSOutputStream {
*/
   private final ECInfo ecInfo;
   private final int cellSize;
+  // checksum buffer, we only need to calculate checksum for parity blocks
+  private byte[] checksumBuf;
   private ByteBuffer[] cellBuffers;
 
   private final short numAllBlocks;
@@ -99,6 +101,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
 
 checkConfiguration();
 
+checksumBuf = new byte[getChecksumSize() * (cellSize / bytesPerChecksum)];
 cellBuffers = new ByteBuffer[numAllBlocks];
 ListBlockingQueueLocatedBlock stripeBlocks = new ArrayList();
 
@@ -179,6 +182,10 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   private ListDFSPacket generatePackets(ByteBuffer byteBuffer)
   throws IOException{
 ListDFSPacket packets = new ArrayList();
+assert byteBuffer.hasArray();
+getDataChecksum().calculateChunkedSums(byteBuffer.array(), 0,
+byteBuffer.remaining(), checksumBuf, 0);
+int ckOff = 0;
 while (byteBuffer.remaining()  0) {
   DFSPacket p = createPacket(packetSize, chunksPerPacket,
   streamer.getBytesCurBlock(),
@@ -186,6 +193,9 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   int maxBytesToPacket = p.getMaxChunks() * bytesPerChecksum;
   int toWrite = byteBuffer.remaining()  maxBytesToPacket ?
   maxBytesToPacket: byteBuffer.remaining();
+  int ckLen = ((toWrite - 1) / bytesPerChecksum + 1) * getChecksumSize();
+  p.writeChecksum(checksumBuf, ckOff, ckLen);
+  ckOff += ckLen;
   p.writeData(byteBuffer, 

[50/50] hadoop git commit: Fix merge conflicts.

2015-04-29 Thread jing9
Fix merge conflicts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3d0e558
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3d0e558
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3d0e558

Branch: refs/heads/HDFS-7285
Commit: f3d0e5588114d6fb2c37c42981180125f19a6ba3
Parents: 3102e6a
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 29 11:35:58 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:35:58 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSInputStream.java  |  7 +++
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java   | 15 ---
 .../java/org/apache/hadoop/hdfs/DataStreamer.java|  1 -
 .../org/apache/hadoop/hdfs/StripedDataStreamer.java  |  7 ---
 4 files changed, 11 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3d0e558/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6eb25d0..bef4da0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1116,7 +1116,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   /**
* Read data from one DataNode.
* @param datanode the datanode from which to read data
-   * @param block the block to read
+   * @param blockStartOffset starting offset in the file
* @param startInBlk the startInBlk offset of the block
* @param endInBlk the endInBlk offset of the block
* @param buf the given byte array into which the data is read
@@ -1146,7 +1146,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   BlockReader reader = null;
   try {
 DFSClientFaultInjector.get().fetchFromDatanodeException();
-reader = getBlockReader(block, start, len, datanode.addr,
+reader = getBlockReader(block, startInBlk, len, datanode.addr,
 datanode.storageType, datanode.info);
 for (int i = 0; i  offsets.length; i++) {
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
@@ -1203,8 +1203,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* with each other.
*/
   private void checkReadPortions(int[] offsets, int[] lengths, int totalLen) {
-Preconditions.checkArgument(offsets.length == lengths.length 
-offsets.length  0);
+Preconditions.checkArgument(offsets.length == lengths.length  
offsets.length  0);
 int sum = 0;
 for (int i = 0; i  lengths.length; i++) {
   if (i  0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3d0e558/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 6842267..c930187 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -124,10 +124,7 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 for (short i = 0; i  numAllBlocks; i++) {
   StripedDataStreamer streamer = new StripedDataStreamer(stat, null,
   dfsClient, src, progress, checksum, cachingStrategy, 
byteArrayManager,
-  i, stripeBlocks);
-  if (favoredNodes != null  favoredNodes.length != 0) {
-streamer.setFavoredNodes(favoredNodes);
-  }
+  i, stripeBlocks, favoredNodes);
   s.add(streamer);
 }
 streamers = Collections.unmodifiableList(s);
@@ -316,7 +313,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   return;
 }
 for (StripedDataStreamer streamer : streamers) {
-  streamer.setLastException(new IOException(Lease timeout of 
+  streamer.getLastException().set(new IOException(Lease timeout of 
   + (dfsClient.getConf().getHdfsTimeout()/1000) +
seconds expired.));
 }
@@ -414,12 +411,8 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   @Override
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
-  IOException e = getLeadingStreamer().getLastException().getAndSet(null);
-  if 

[20/50] hadoop git commit: HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of ECZone. Contributed by Vinayakumar B.

2015-04-29 Thread jing9
HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d78eec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d78eec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d78eec3

Branch: refs/heads/HDFS-7285
Commit: 5d78eec334f5e452a7630bec4a93d0f28ea65129
Parents: 87e09b7
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:08:57 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 ++--
 .../hadoop/hdfs/DistributedFileSystem.java  | 33 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  6 ++--
 ...tNamenodeProtocolServerSideTranslatorPB.java |  4 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  5 ++-
 .../namenode/ErasureCodingZoneManager.java  | 30 +-
 .../hdfs/server/namenode/FSDirectory.java   | 22 -
 .../hdfs/server/namenode/FSNamesystem.java  | 19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++--
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 18 +--
 .../server/namenode/TestAddStripedBlocks.java   |  2 +-
 .../server/namenode/TestFSEditLogLoader.java|  4 +--
 .../hdfs/server/namenode/TestFSImage.java   |  4 +--
 16 files changed, 112 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d78eec3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 994d5ac..a254485 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1319,7 +1319,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  Progressable progress,
  int buffersize,
  ChecksumOpt checksumOpt) throws IOException {
-return create(src, permission, flag, createParent, replication, blockSize, 
+return create(src, permission, flag, createParent, replication, blockSize,
 progress, buffersize, checksumOpt, null);
   }
 
@@ -2970,12 +2970,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
-  public void createErasureCodingZone(String src)
+  public void createErasureCodingZone(String src, ECSchema schema)
   throws IOException {
 checkOpen();
 TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
 try {
-  namenode.createErasureCodingZone(src);
+  namenode.createErasureCodingZone(src, schema);
 } catch (RemoteException re) {
   throw re.unwrapRemoteException(AccessControlException.class,
   SafeModeException.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d78eec3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4ca6d57..4c3e0a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -86,6 +86,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
@@ -2264,4 +2265,36 @@ public class DistributedFileSystem extends FileSystem {
   throws IOException {
 return dfs.getInotifyEventStream(lastReadTxid);
   }
+
+  /**
+   * Create the erasurecoding zone
+   * 
+   * @param path Directory to create the ec zone
+   * @param 

[08/50] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-29 Thread jing9
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99901eb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99901eb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99901eb3

Branch: refs/heads/HDFS-7285
Commit: 99901eb3d91d052db246cd945a52821940a5403e
Parents: f24e244
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:31:46 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:50 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99901eb3/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 01280db..68d1d32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -32,3 +32,6 @@
 
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
+
+HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
+( Kai Zheng )



[10/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-29 Thread jing9
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/593bbfd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/593bbfd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/593bbfd5

Branch: refs/heads/HDFS-7285
Commit: 593bbfd53a8aa7561acc74bb1a382ee7b68b5859
Parents: 347619a
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:09 2015 -0700

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  55 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  80 +++-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 367 +++
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../hdfs/protocol/LocatedStripedBlock.java  |   5 +
 .../blockmanagement/BlockInfoStriped.java   |   6 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  92 -
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 304 +++
 .../namenode/TestRecoverStripedBlocks.java  |  88 +
 11 files changed, 897 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/593bbfd5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 4e8f202..a9596bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -203,4 +203,8 @@ public class LocatedBlock {
 + ; locs= + Arrays.asList(locs)
 + };
   }
+
+  public boolean isStriped() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/593bbfd5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index da3b0e5..ff8bad0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -236,6 +236,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
   new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
+  private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
   private final Sampler? traceSampler;
 
   public DfsClientConf getConf() {
@@ -371,6 +372,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (dfsClientConf.getHedgedReadThreadpoolSize()  0) {
   
this.initThreadsNumForHedgedReads(dfsClientConf.getHedgedReadThreadpoolSize());
 }
+numThreads = conf.getInt(
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE,
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+if (numThreads = 0) {
+  LOG.warn(The value of 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE
+  +  must be greater than 0. The current setting is  + numThreads
+  + . Reset it to the default value 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+  numThreads =
+  DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE;
+}
+this.initThreadsNumForStripedReads(numThreads);
 this.saslClient = new SaslDataTransferClient(
   conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
   TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
@@ -3151,11 +3165,52 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
+  
+  /**
+   * Create thread pool for parallel reading in striped layout,
+   * STRIPED_READ_THREAD_POOL, if it does not already exist.
+   * @param num Number of threads for striped reads thread pool.
+   */
+  private void initThreadsNumForStripedReads(int num) {
+assert num  0;
+if (STRIPED_READ_THREAD_POOL != null) {
+

[13/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-29 Thread jing9
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/631916c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/631916c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/631916c3

Branch: refs/heads/HDFS-7285
Commit: 631916c34a39f57924a70174180683fcd0440df1
Parents: 593bbfd
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:10 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/631916c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ff8bad0..f4eea49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3165,7 +3165,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
-  
+
   /**
* Create thread pool for parallel reading in striped layout,
* STRIPED_READ_THREAD_POOL, if it does not already exist.



[29/50] hadoop git commit: HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block is a striped block. Contributed by Hui Zheng.

2015-04-29 Thread jing9
HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block 
is a striped block. Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94d0e9a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94d0e9a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94d0e9a0

Branch: refs/heads/HDFS-7285
Commit: 94d0e9a0b7d536480b5c5c4491f9e9b79e4581f9
Parents: d5683a3
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 17 12:05:31 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:54 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../hdfs/server/blockmanagement/BlockManager.java | 18 --
 2 files changed, 6 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94d0e9a0/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 78ca6d3..0ed61cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -85,3 +85,5 @@
 
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
+
+HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94d0e9a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dd00e6d..29ca26d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2924,15 +2924,6 @@ public class BlockManager {
   }
 
   /**
-   * Set the value of whether there are any non-EC blocks using StripedID.
-   *
-   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
-   */
-  public void hasNonEcBlockUsingStripedID(boolean has){
-hasNonEcBlockUsingStripedID = has;
-  }
-
-  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3528,7 +3519,7 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-  if ((info == null)  hasNonEcBlockUsingStripedID()){
+  if ((info == null)  hasNonEcBlockUsingStripedID){
 info = blocksMap.getStoredBlock(block);
   }
 } else {
@@ -3712,10 +3703,9 @@ public class BlockManager {
*/
   public BlockInfo addBlockCollectionWithCheck(
   BlockInfo block, BlockCollection bc) {
-if (!hasNonEcBlockUsingStripedID()){
-  if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
-hasNonEcBlockUsingStripedID(true);
-  }
+if (!hasNonEcBlockUsingStripedID  !block.isStriped() 
+BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  hasNonEcBlockUsingStripedID = true;
 }
 return addBlockCollection(block, bc);
   }



[37/50] hadoop git commit: HDFS-8024. Erasure Coding: ECworker frame, basics, bootstraping and configuration. (Contributed by Uma Maheswara Rao G)

2015-04-29 Thread jing9
HDFS-8024. Erasure Coding: ECworker frame, basics, bootstraping and 
configuration. (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12e161d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12e161d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12e161d3

Branch: refs/heads/HDFS-7285
Commit: 12e161d382d6b72e8326039f8cbf3de391c75c0a
Parents: 7dfd6d4
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Wed Apr 22 19:30:14 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 .../erasurecode/coder/AbstractErasureCoder.java |  2 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  7 ++
 .../hdfs/server/datanode/BPOfferService.java|  6 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 +++
 .../erasurecode/ErasureCodingWorker.java| 83 
 .../src/main/proto/DatanodeProtocol.proto   |  2 +
 7 files changed, 112 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e161d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index e5bf11a..7403e35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -66,7 +66,7 @@ public abstract class AbstractErasureCoder
* @param isEncoder
* @return raw coder
*/
-  protected static RawErasureCoder createRawCoder(Configuration conf,
+  public static RawErasureCoder createRawCoder(Configuration conf,
   String rawCoderFactoryKey, boolean isEncoder) {
 
 if (conf == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e161d3/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3d86f05..1acde41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -113,3 +113,6 @@
 
 HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema
 in FileSystemLinkResolver. (szetszwo via Zhe Zhang)
+
+HDFS-8024. Erasure Coding: ECworker frame, basics, bootstraping and 
configuration.
+(umamahesh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e161d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 0c6c97d..60e6d91 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -973,6 +973,8 @@ public class PBHelper {
   return REG_CMD;
 case BlockIdCommand:
   return PBHelper.convert(proto.getBlkIdCmd());
+case BlockECRecoveryCommand:
+  return PBHelper.convert(proto.getBlkECRecoveryCmd());
 default:
   return null;
 }
@@ -1123,6 +1125,11 @@ public class PBHelper {
   builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
 setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
   break;
+case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
+  builder.setCmdType(DatanodeCommandProto.Type.BlockECRecoveryCommand)
+  .setBlkECRecoveryCmd(
+  convert((BlockECRecoveryCommand) datanodeCommand));
+  break;
 case DatanodeProtocol.DNA_UNKNOWN: //Not expected
 default:
   builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12e161d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 

[25/50] hadoop git commit: HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas loaded in Namenode. (Contributed by Vinayakumar B)

2015-04-29 Thread jing9
HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas 
loaded in Namenode. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/824683d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/824683d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/824683d1

Branch: refs/heads/HDFS-7285
Commit: 824683d13b71aed0438e2f89d840de170e556b4e
Parents: 1ce17bc
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Apr 10 15:07:32 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  |  4 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 11 
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 +++
 .../ClientNamenodeProtocolTranslatorPB.java | 26 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  3 +-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   | 57 
 12 files changed, 164 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/824683d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8c3310e..32077f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -123,12 +123,12 @@ public final class ECSchema {
 
 this.chunkSize = DEFAULT_CHUNK_SIZE;
 try {
-  if (options.containsKey(CHUNK_SIZE_KEY)) {
+  if (this.options.containsKey(CHUNK_SIZE_KEY)) {
 this.chunkSize = Integer.parseInt(options.get(CHUNK_SIZE_KEY));
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  this.options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
is found. It should be an integer);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/824683d1/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 753795a..5250dfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -58,4 +58,7 @@
 
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
-HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
+
+HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
+ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/824683d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 16f876c..994d5ac 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -163,6 +163,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -3109,6 +3110,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECSchema[] getECSchemas() throws 

[23/50] hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-04-29 Thread jing9
HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39af9f1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39af9f1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39af9f1d

Branch: refs/heads/HDFS-7285
Commit: 39af9f1df648596a24362de96126e192ac23113d
Parents: de6b66d
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:23:07 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39af9f1d/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 07bbd4a..9fdac98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -51,11 +51,20 @@
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 
+HDFS-7969. Erasure coding: NameNode support for lease recovery of striped
+block groups. (Zhe Zhang)
+
+HDFS-7782. Erasure coding: pread from files in striped layout.
+(Zhe Zhang and Jing Zhao via Zhe Zhang)
+
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
+HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks.
+(Jing Zhao and Zhe Zhang via Jing Zhao)
+
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
@@ -63,5 +72,11 @@
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
 ECSchemas loaded in Namenode. (vinayakumarb)
 
+HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone.
+(Vinayakumar B via Zhe Zhang)
+
+HDFS-8114. Erasure coding: Add auditlog 
FSNamesystem#createErasureCodingZone if this
+operation fails. (Rakesh R via Zhe Zhang)
+
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file



[43/50] hadoop git commit: HDFS-8136. Client gets and uses EC schema when reads and writes a stripping file. Contributed by Kai Sasaki

2015-04-29 Thread jing9
HDFS-8136. Client gets and uses EC schema when reads and writes a stripping 
file. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b9f8733
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b9f8733
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b9f8733

Branch: refs/heads/HDFS-7285
Commit: 5b9f8733fa786f026bdc0750752030efbeb5b0c8
Parents: e6a9253
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 24 00:19:12 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:56 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  |  17 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java |  24 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 175 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   4 +-
 .../apache/hadoop/hdfs/TestReadStripedFile.java |   1 -
 7 files changed, 210 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9f8733/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b2faac0..8977c46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -119,3 +119,6 @@
 
 HDFS-8156. Add/implement necessary APIs even we just have the system 
default 
 schema. (Kai Zheng via Zhe Zhang)
+
+HDFS-8136. Client gets and uses EC schema when reads and writes a stripping
+file. (Kai Sasaki via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9f8733/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index d597407..d0e2b68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -21,9 +21,9 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
@@ -125,13 +125,19 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return results;
   }
 
-  private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
-  private final short dataBlkNum = HdfsConstants.NUM_DATA_BLOCKS;
-  private final short parityBlkNum = HdfsConstants.NUM_PARITY_BLOCKS;
+  private final int cellSize;
+  private final short dataBlkNum;
+  private final short parityBlkNum;
+  private final ECInfo ecInfo;
 
   DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum)
   throws IOException {
 super(dfsClient, src, verifyChecksum);
+// ECInfo is restored from NN just before reading striped file.
+ecInfo = dfsClient.getErasureCodingInfo(src);
+cellSize = ecInfo.getSchema().getChunkSize();
+dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
+parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();
 DFSClient.LOG.debug(Creating an striped input stream for file  + src);
   }
 
@@ -279,9 +285,6 @@ public class DFSStripedInputStream extends DFSInputStream {
 throw new InterruptedException(let's retry);
   }
 
-  public void setCellSize(int cellSize) {
-this.cellSize = cellSize;
-  }
 
   /**
* This class represents the portion of I/O associated with each block in the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9f8733/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 

[01/50] hadoop git commit: HDFS-7969. Erasure coding: NameNode support for lease recovery of striped block groups. Contributed by Zhe Zhang.

2015-04-29 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 bd5cb5981 - f3d0e5588 (forced update)


HDFS-7969. Erasure coding: NameNode support for lease recovery of striped block 
groups. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80e6018c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80e6018c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80e6018c

Branch: refs/heads/HDFS-7285
Commit: 80e6018c430ac2a4c0e2b6fb88bf9c771f48e49a
Parents: 979f453
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 12:52:44 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:49 2015 -0700

--
 .../BlockInfoContiguousUnderConstruction.java   | 33 
 .../BlockInfoStripedUnderConstruction.java  | 80 
 .../BlockInfoUnderConstruction.java | 57 ++
 .../blockmanagement/DatanodeDescriptor.java | 12 +--
 .../server/blockmanagement/DatanodeManager.java | 10 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 24 +++---
 .../TestBlockInfoUnderConstruction.java |  2 +-
 7 files changed, 163 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80e6018c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 7a052fd..9ba2978 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -31,7 +31,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
  * Represents a block that is currently being constructed.br
  * This is usually the last block of a file opened for write or append.
  */
-public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
+public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous
+implements BlockInfoUnderConstruction{
   /** Block state. See {@link BlockUCState} */
   private BlockUCState blockUCState;
 
@@ -94,7 +95,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 return new BlockInfoContiguous(this);
   }
 
-  /** Set expected locations */
+  @Override
   public void setExpectedLocations(DatanodeStorageInfo[] targets) {
 int numLocations = targets == null ? 0 : targets.length;
 this.replicas = new ArrayList(numLocations);
@@ -104,10 +105,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 }
   }
 
-  /**
-   * Create array of expected replica locations
-   * (as has been assigned by chooseTargets()).
-   */
+  @Override
   public DatanodeStorageInfo[] getExpectedStorageLocations() {
 int numLocations = replicas == null ? 0 : replicas.size();
 DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations];
@@ -117,7 +115,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 return storages;
   }
 
-  /** Get the number of expected locations */
+  @Override
   public int getNumExpectedLocations() {
 return replicas == null ? 0 : replicas.size();
   }
@@ -135,25 +133,26 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 blockUCState = s;
   }
 
-  /** Get block recovery ID */
+  @Override
   public long getBlockRecoveryId() {
 return blockRecoveryId;
   }
 
-  /** Get recover block */
+  @Override
   public Block getTruncateBlock() {
 return truncateBlock;
   }
 
+  @Override
+  public Block toBlock(){
+return this;
+  }
+
   public void setTruncateBlock(Block recoveryBlock) {
 this.truncateBlock = recoveryBlock;
   }
 
-  /**
-   * Process the recorded replicas. When about to commit or finish the
-   * pipeline recovery sort out bad replicas.
-   * @param genStamp  The final generation stamp for the block.
-   */
+  @Override
   public void setGenerationStampAndVerifyReplicas(long genStamp) {
 // Set the generation stamp for the block.
 setGenerationStamp(genStamp);
@@ -187,11 +186,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }
 
-  /**
-   * Initialize lease recovery for this block.
-   * Find the first 

[36/50] hadoop git commit: HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema in FileSystemLinkResolver. Contributed by Tsz Wo Nicholas Sze.

2015-04-29 Thread jing9
HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema in 
FileSystemLinkResolver. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dfd6d42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dfd6d42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dfd6d42

Branch: refs/heads/HDFS-7285
Commit: 7dfd6d42b01013bc7ebf08a4f0e55da18c8019c6
Parents: dbeb2c9
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 21 21:03:07 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 3 +++
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dfd6d42/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index d8f2e9d..3d86f05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -110,3 +110,6 @@
 
 HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
 create BlockReader. (szetszwo via Zhe Zhang)
+
+HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema
+in FileSystemLinkResolver. (szetszwo via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dfd6d42/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4c8fff3..ede4f48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2281,7 +2281,7 @@ public class DistributedFileSystem extends FileSystem {
   @Override
   public Void doCall(final Path p) throws IOException,
   UnresolvedLinkException {
-dfs.createErasureCodingZone(getPathName(p), null);
+dfs.createErasureCodingZone(getPathName(p), schema);
 return null;
   }
 



[38/50] hadoop git commit: HDFS-8181. createErasureCodingZone sets retryCache state as false always (Contributed by Uma Maheswara Rao G)

2015-04-29 Thread jing9
HDFS-8181. createErasureCodingZone sets retryCache state as false always 
(Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80e516fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80e516fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80e516fc

Branch: refs/heads/HDFS-7285
Commit: 80e516fcc977de4b05f6a0cfbbf0258749a543af
Parents: 92aadcd
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 15:04:49 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 14 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java   |  1 +
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80e516fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 40517e7..c8dbf08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -83,10 +83,24 @@
 
 HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
 
+HDFS-8120. Erasure coding: created util class to analyze striped block 
groups.
+(Contributed by Zhe Zhang and Li Bo via Jing Zhao)
+
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
 
+HDFS-8166. DFSStripedOutputStream should not create empty blocks. (Jing 
Zhao)
+
+HDFS-7937. Erasure Coding: INodeFile quota computation unit tests.
+(Kai Sasaki via Jing Zhao)
+
+HDFS-8145. Fix the editlog corruption exposed by failed 
TestAddStripedBlocks.
+(Jing Zhao)
+
 HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
 making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8181. createErasureCodingZone sets retryCache state as false always
+(Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80e516fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 7bbf358..36510f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1856,6 +1856,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 boolean success = false;
 try {
   namesystem.createErasureCodingZone(src, schema, cacheEntry != null);
+  success = true;
 } finally {
   RetryCache.setState(cacheEntry, success);
 }



[39/50] hadoop git commit: HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 and HDFS-8169. Contributed by Zhe Zhang.

2015-04-29 Thread jing9
HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 
and HDFS-8169. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df776013
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df776013
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df776013

Branch: refs/heads/HDFS-7285
Commit: df776013d54e0ee971e97c700ff2c4f1bb6626ce
Parents: 80e516f
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 20 14:19:12 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 12 
 .../hdfs/protocol/LocatedStripedBlock.java  | 64 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 21 ++
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 21 +-
 .../hdfs/protocol/LocatedStripedBlock.java  | 73 
 .../server/blockmanagement/BlockManager.java| 25 ---
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/TestStripedINodeFile.java   |  3 +-
 8 files changed, 120 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df776013/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 86c8a87..dc2f1d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -176,6 +176,18 @@ public interface HdfsClientConfigKeys {
 int THREADPOOL_SIZE_DEFAULT = 0;
   }
 
+  /** dfs.client.read.striped configuration properties */
+  interface StripedRead {
+String PREFIX = Read.PREFIX + striped.;
+
+String  THREADPOOL_SIZE_KEY = PREFIX + threadpool.size;
+/**
+ * With default 6+3 schema, each normal read could span 6 DNs. So this
+ * default value accommodates 3 read streams
+ */
+int THREADPOOL_SIZE_DEFAULT = 18;
+  }
+
   /** dfs.http.client configuration properties */
   interface HttpClient {
 String  PREFIX = dfs.http.client.;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df776013/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
new file mode 100644
index 000..93a5948
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+
+import java.util.Arrays;
+
+/**
+ * {@link LocatedBlock} with striped block support. For a striped block, each
+ * datanode storage is associated with a block in the block group. We need to
+ * record the index (in the striped block group) for each of them.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class LocatedStripedBlock extends LocatedBlock {
+  private int[] blockIndices;
+
+  public LocatedStripedBlock(ExtendedBlock b, DatanodeInfo[] locs,
+  String[] storageIDs, StorageType[] storageTypes, int[] indices,
+  long startOffset, boolean corrupt, 

[34/50] hadoop git commit: HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.

2015-04-29 Thread jing9
HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b0ad404
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b0ad404
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b0ad404

Branch: refs/heads/HDFS-7285
Commit: 4b0ad40441f6ea56f63c57b371d1837547b2b2b1
Parents: df77601
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Apr 20 17:42:02 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  61 ---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 178 +++
 3 files changed, 100 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b0ad404/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c8dbf08..8f28285 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -104,3 +104,6 @@
 
 HDFS-8181. createErasureCodingZone sets retryCache state as false always
 (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
+(szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b0ad404/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 2368021..d622d4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Utility class for analyzing striped block groups
  */
@@ -81,46 +83,43 @@ public class StripedBlockUtil {
   /**
* Get the size of an internal block at the given index of a block group
*
-   * @param numBytesInGroup Size of the block group only counting data blocks
+   * @param dataSize Size of the block group only counting data blocks
* @param cellSize The size of a striping cell
-   * @param dataBlkNum The number of data blocks
-   * @param idxInGroup The logical index in the striped block group
+   * @param numDataBlocks The number of data blocks
+   * @param i The logical index in the striped block group
* @return The size of the internal block at the specified index
*/
-  public static long getInternalBlockLength(long numBytesInGroup,
-  int cellSize, int dataBlkNum, int idxInGroup) {
+  public static long getInternalBlockLength(long dataSize,
+  int cellSize, int numDataBlocks, int i) {
+Preconditions.checkArgument(dataSize = 0);
+Preconditions.checkArgument(cellSize  0);
+Preconditions.checkArgument(numDataBlocks  0);
+Preconditions.checkArgument(i = 0);
 // Size of each stripe (only counting data blocks)
-final long numBytesPerStripe = cellSize * dataBlkNum;
-assert numBytesPerStripe   0:
-getInternalBlockLength should only be called on valid striped blocks;
+final int stripeSize = cellSize * numDataBlocks;
 // If block group ends at stripe boundary, each internal block has an equal
 // share of the group
-if (numBytesInGroup % numBytesPerStripe == 0) {
-  return numBytesInGroup / dataBlkNum;
+final int lastStripeDataLen = (int)(dataSize % stripeSize);
+if (lastStripeDataLen == 0) {
+  return dataSize / numDataBlocks;
 }
 
-int numStripes = (int) ((numBytesInGroup - 1) / numBytesPerStripe + 1);
-assert numStripes = 1 : There should be at least 1 stripe;
-
-// All stripes but the last one are full stripes. The block should at least
-// contain (numStripes - 1) full cells.
-long blkSize = (numStripes - 1) * cellSize;
-
-long lastStripeLen = numBytesInGroup % numBytesPerStripe;
-// Size of parity cells should equal the size of the first cell, if it
-// is not full.
-long lastParityCellLen = Math.min(cellSize, lastStripeLen);
-
-if (idxInGroup = dataBlkNum) {
-  // for 

[22/50] hadoop git commit: HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if this operation fails. Contributed by Rakesh R.

2015-04-29 Thread jing9
HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if 
this operation fails. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3297989c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3297989c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3297989c

Branch: refs/heads/HDFS-7285
Commit: 3297989c20dd296222423474d43d955284e5e552
Parents: 5d78eec
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:15:02 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 ++--
 1 file changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3297989c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7c1fd16..dc71201 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8133,11 +8133,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   SafeModeException, AccessControlException {
 String src = srcArg;
 HdfsFileStatus resultingStat = null;
-checkSuperuserPrivilege();
-checkOperation(OperationCategory.WRITE);
-final byte[][] pathComponents =
-FSDirectory.getPathComponentsForReservedPath(src);
-FSPermissionChecker pc = getPermissionChecker();
+FSPermissionChecker pc = null;
+byte[][] pathComponents = null;
+boolean success = false;
+try {
+  checkSuperuserPrivilege();
+  checkOperation(OperationCategory.WRITE);
+  pathComponents =
+  FSDirectory.getPathComponentsForReservedPath(src);
+  pc = getPermissionChecker();
+} catch (Throwable e) {
+  logAuditEvent(success, createErasureCodingZone, srcArg);
+  throw e;
+}
 writeLock();
 try {
   checkSuperuserPrivilege();
@@ -8151,11 +8159,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
   final INodesInPath iip = dir.getINodesInPath4Write(src, false);
   resultingStat = dir.getAuditFileInfo(iip);
+  success = true;
 } finally {
   writeUnlock();
 }
 getEditLog().logSync();
-logAuditEvent(true, createErasureCodingZone, srcArg, null, 
resultingStat);
+logAuditEvent(success, createErasureCodingZone, srcArg, null, 
resultingStat);
   }
 
   /**



[03/50] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang.

2015-04-29 Thread jing9
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0ef98ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0ef98ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0ef98ea

Branch: refs/heads/HDFS-7285
Commit: b0ef98ea335c319dcebb2b59004feb033dd3050a
Parents: d717dc3
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 3 15:22:50 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:49 2015 -0700

--
 .../coder/AbstractErasureDecoder.java   |  7 ++--
 .../coder/AbstractErasureEncoder.java   |  7 ++--
 .../io/erasurecode/coder/ErasureCoder.java  | 12 ++
 .../io/erasurecode/coder/ErasureDecoder.java| 41 
 .../io/erasurecode/coder/ErasureEncoder.java| 39 ---
 .../erasurecode/coder/TestErasureCoderBase.java | 20 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 14 ++-
 7 files changed, 41 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ef98ea/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
index 54a6d1e..cd31294 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
@@ -23,13 +23,12 @@ import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 /**
  * An abstract erasure decoder that's to be inherited by new decoders.
  *
- * It implements the {@link ErasureDecoder} interface.
+ * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureDecoder extends AbstractErasureCoder
-implements ErasureDecoder {
+public abstract class AbstractErasureDecoder extends AbstractErasureCoder {
 
   @Override
-  public ErasureCodingStep decode(ECBlockGroup blockGroup) {
+  public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
 // We may have more than this when considering complicate cases. 
HADOOP-11550
 return prepareDecodingStep(blockGroup);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ef98ea/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
index 09b31e5..a836b75 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
@@ -23,13 +23,12 @@ import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 /**
  * An abstract erasure encoder that's to be inherited by new encoders.
  *
- * It implements the {@link ErasureEncoder} interface.
+ * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureEncoder extends AbstractErasureCoder
-implements ErasureEncoder {
+public abstract class AbstractErasureEncoder extends AbstractErasureCoder {
 
   @Override
-  public ErasureCodingStep encode(ECBlockGroup blockGroup) {
+  public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
 // We may have more than this when considering complicate cases. 
HADOOP-11550
 return prepareEncodingStep(blockGroup);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ef98ea/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
index c5922f3..fb90156 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
+++ 

[21/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by Zhe Zhang.

2015-04-29 Thread jing9
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87e09b71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87e09b71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87e09b71

Branch: refs/heads/HDFS-7285
Commit: 87e09b71aa12341ce613edfe74dc92e0c9fb4ff1
Parents: 824683d
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 10:56:24 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSInputStream.java |  4 ++--
 .../apache/hadoop/hdfs/DFSStripedInputStream.java   | 16 +---
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  5 +++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 ++-
 5 files changed, 18 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e09b71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 79bbd54..9104f84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1106,7 +1106,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int offset, MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
 final int length = (int) (end - start + 1);
-actualGetFromOneDataNode(datanode, block, start, end, buf,
+actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
 new int[]{offset}, new int[]{length}, corruptedBlockMap);
   }
 
@@ -1125,7 +1125,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
*  block replica
*/
   void actualGetFromOneDataNode(final DNAddrPair datanode,
-  LocatedBlock block, final long startInBlk, final long endInBlk,
+  long blockStartOffset, final long startInBlk, final long endInBlk,
   byte[] buf, int[] offsets, int[] lengths,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e09b71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 077b0f8..8a431b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -224,7 +224,7 @@ public class DFSStripedInputStream extends DFSInputStream {
* Real implementation of pread.
*/
   @Override
-  protected void fetchBlockByteRange(LocatedBlock block, long start,
+  protected void fetchBlockByteRange(long blockStartOffset, long start,
   long end, byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
@@ -234,7 +234,7 @@ public class DFSStripedInputStream extends DFSInputStream {
 int len = (int) (end - start + 1);
 
 // Refresh the striped block group
-block = getBlockGroupAt(block.getStartOffset());
+LocatedBlock block = getBlockGroupAt(blockStartOffset);
 assert block instanceof LocatedStripedBlock : NameNode +
  should return a LocatedStripedBlock for a striped file;
 LocatedStripedBlock blockGroup = (LocatedStripedBlock) block;
@@ -254,9 +254,11 @@ public class DFSStripedInputStream extends DFSInputStream {
   DatanodeInfo loc = blks[i].getLocations()[0];
   StorageType type = blks[i].getStorageTypes()[0];
   DNAddrPair dnAddr = new DNAddrPair(loc, NetUtils.createSocketAddr(
-  loc.getXferAddr(dfsClient.getConf().connectToDnViaHostname)), type);
-  CallableVoid readCallable = getFromOneDataNode(dnAddr, blks[i],
-  rp.startOffsetInBlock, rp.startOffsetInBlock + rp.readLength - 1, 
buf,
+  loc.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname())),
+  type);
+  CallableVoid readCallable = getFromOneDataNode(dnAddr,
+

[47/50] hadoop git commit: HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated as Idempotent (Contributed by Vinayakumar B)

2015-04-29 Thread jing9
HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated as 
Idempotent (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6452510a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6452510a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6452510a

Branch: refs/heads/HDFS-7285
Commit: 6452510ac57a3ecadb3353d5abd8af2a3e8fc27f
Parents: 5e8cea7
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 28 14:24:17 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:17:52 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 -
 .../apache/hadoop/hdfs/protocol/ClientProtocol.java | 16 
 2 files changed, 12 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6452510a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c28473b..6c5d7ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -136,4 +136,7 @@
 striped layout (Zhe Zhang)
 
 HDFS-8230. Erasure Coding: Ignore 
DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY 
-commands from standbynode if any (vinayakumarb)
\ No newline at end of file
+commands from standbynode if any (vinayakumarb)
+
+HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated
+as Idempotent (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6452510a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index c257cc1..e32ac22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1364,14 +1364,6 @@ public interface ClientProtocol {
   long prevId) throws IOException;
 
   /**
-   * Create an erasure coding zone with specified schema, if any, otherwise
-   * default
-   */
-  @Idempotent
-  public void createErasureCodingZone(String src, ECSchema schema)
-  throws IOException;
-
-  /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by .. For example,
* user.attr.
@@ -1467,6 +1459,14 @@ public interface ClientProtocol {
   public EventBatchList getEditsFromTxid(long txid) throws IOException;
 
   /**
+   * Create an erasure coding zone with specified schema, if any, otherwise
+   * default
+   */
+  @AtMostOnce
+  public void createErasureCodingZone(String src, ECSchema schema)
+  throws IOException;
+
+  /**
* Gets the ECInfo for the specified file/directory
* 
* @param src



[24/50] hadoop git commit: HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar B)

2015-04-29 Thread jing9
HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar 
B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/186f139c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/186f139c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/186f139c

Branch: refs/heads/HDFS-7285
Commit: 186f139ced853e54572a8a568d4d7073dcd96e68
Parents: 39af9f1
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 16:38:22 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:15:36 2015 -0700

--
 .../main/java/org/apache/hadoop/fs/FsShell.java |   8 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  18 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  32 +++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   9 +
 .../apache/hadoop/hdfs/protocol/ECZoneInfo.java |  56 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  18 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  19 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  12 ++
 .../namenode/ErasureCodingZoneManager.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |  10 +
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../hadoop/hdfs/tools/erasurecode/ECCli.java|  48 +
 .../hdfs/tools/erasurecode/ECCommand.java   | 209 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +
 .../src/main/proto/erasurecoding.proto  |  15 ++
 18 files changed, 502 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/186f139c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index db73f6d..f873a01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -111,6 +111,10 @@ public class FsShell extends Configured implements Tool {
 return getTrash().getCurrentTrashDir();
   }
 
+  protected String getUsagePrefix() {
+return usagePrefix;
+  }
+
   // NOTE: Usage/Help are inner classes to allow access to outer methods
   // that access commandFactory
   
@@ -194,7 +198,7 @@ public class FsShell extends Configured implements Tool {
   }
 } else {
   // display help or usage for all commands 
-  out.println(usagePrefix);
+  out.println(getUsagePrefix());
   
   // display list of short usages
   ArrayListCommand instances = new ArrayListCommand();
@@ -218,7 +222,7 @@ public class FsShell extends Configured implements Tool {
   }
 
   private void printInstanceUsage(PrintStream out, Command instance) {
-out.println(usagePrefix +   + instance.getUsage());
+out.println(getUsagePrefix() +   + instance.getUsage());
   }
 
   private void printInstanceHelp(PrintStream out, Command instance) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/186f139c/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9fdac98..b9fc6fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -79,4 +79,6 @@
 operation fails. (Rakesh R via Zhe Zhang)
 
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
-separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
+
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/186f139c/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index f464261..84c79b8 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -134,6 +134,11 @@ case ${COMMAND} in
 hadoop_debug Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS
 HADOOP_OPTS=${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}
   ;;
+  erasurecode)
+

[46/50] hadoop git commit: Addendum fix for HDFS-7749 to be compatible with HDFS-7993

2015-04-29 Thread jing9
Addendum fix for HDFS-7749 to be compatible with HDFS-7993


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b65f5089
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b65f5089
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b65f5089

Branch: refs/heads/HDFS-7285
Commit: b65f508988a7a3df0b7f48f7487759d1993294a9
Parents: f827630
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 27 11:08:16 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:17:52 2015 -0700

--
 .../apache/hadoop/hdfs/server/namenode/NamenodeFsck.java  | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b65f5089/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 0c564bd..b108cd6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -629,9 +629,9 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 report.append( repl= + liveReplicas);
 if (showLocations || showRacks || showReplicaDetails) {
   StringBuilder sb = new StringBuilder([);
-  IterableDatanodeStorageInfo storages = 
bm.getStorages(block.getLocalBlock());
-  for (IteratorDatanodeStorageInfo iterator = storages.iterator(); 
iterator.hasNext();) {
-DatanodeStorageInfo storage = iterator.next();
+  DatanodeStorageInfo[] storages = bm.getStorages(storedBlock);
+  for (int i = 0; i  storages.length; i++) {
+DatanodeStorageInfo storage = storages[i];
 DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
 if (showRacks) {
   sb.append(NodeBase.getPath(dnDesc));
@@ -640,7 +640,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   .getStorageType()));
 }
 if (showReplicaDetails) {
-  LightWeightLinkedSetBlock blocksExcess =
+  LightWeightLinkedSetBlockInfo blocksExcess =
   bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
   CollectionDatanodeDescriptor corruptReplicas =
   bm.getCorruptReplicas(block.getLocalBlock());
@@ -661,7 +661,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 sb.append(LIVE));
   }
 }
-if (iterator.hasNext()) {
+if (i  storages.length - 1) {
   sb.append(, );
 }
   }



[09/50] hadoop git commit: HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng

2015-04-29 Thread jing9
HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24e2442
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24e2442
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24e2442

Branch: refs/heads/HDFS-7285
Commit: f24e2442b74942370de99dfeb8566a8d56f8861d
Parents: 80e6018
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:26:40 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:50 2015 -0700

--
 .../io/erasurecode/coder/RSErasureDecoder.java  |  8 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  4 +-
 .../io/erasurecode/coder/XORErasureDecoder.java | 78 
 .../io/erasurecode/coder/XORErasureEncoder.java | 45 ++
 .../io/erasurecode/coder/XorErasureDecoder.java | 78 
 .../io/erasurecode/coder/XorErasureEncoder.java | 45 --
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  | 69 ---
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  | 78 
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ---
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 78 
 .../rawcoder/RSRawErasureCoderFactory.java  | 34 +++
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 81 +
 .../io/erasurecode/rawcoder/XORRawEncoder.java  | 61 +
 .../rawcoder/XORRawErasureCoderFactory.java | 34 +++
 .../io/erasurecode/rawcoder/XorRawDecoder.java  | 81 -
 .../io/erasurecode/rawcoder/XorRawEncoder.java  | 61 -
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ---
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 .../io/erasurecode/coder/TestXORCoder.java  | 50 +++
 .../io/erasurecode/coder/TestXorCoder.java  | 50 ---
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 93 
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 93 
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 49 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   | 51 ---
 25 files changed, 680 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24e2442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index ba32f04..e2c5051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -4,9 +4,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.rawcoder.JRSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.XorRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
 
 /**
  * Reed-Solomon erasure decoder that decodes a block group.
@@ -56,7 +56,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   rsRawDecoder = createRawDecoder(
   CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY);
   if (rsRawDecoder == null) {
-rsRawDecoder = new JRSRawDecoder();
+rsRawDecoder = new RSRawDecoder();
   }
   rsRawDecoder.initialize(getNumDataUnits(),
   getNumParityUnits(), getChunkSize());
@@ -66,7 +66,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateXorRawDecoder() {
 if (xorRawDecoder == null) {
-  xorRawDecoder = new XorRawDecoder();
+  xorRawDecoder = new XORRawDecoder();
   xorRawDecoder.initialize(getNumDataUnits(), 1, getChunkSize());
 }
 return xorRawDecoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24e2442/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
 

[05/50] hadoop git commit: HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure code ( Contributed by Kai Zheng)

2015-04-29 Thread jing9
HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure 
code ( Contributed by Kai Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/347619a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/347619a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/347619a4

Branch: refs/heads/HDFS-7285
Commit: 347619a406415dd0dff186e43a7f871ca13b6748
Parents: b95f233
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 16:05:22 2015 +0530
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:50 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java | 18 
 .../erasurecode/codec/AbstractErasureCodec.java | 88 +++
 .../io/erasurecode/codec/ErasureCodec.java  | 56 
 .../io/erasurecode/codec/RSErasureCodec.java| 38 +
 .../io/erasurecode/codec/XORErasureCodec.java   | 45 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  7 ++
 .../io/erasurecode/coder/ErasureCoder.java  |  7 ++
 .../io/erasurecode/grouper/BlockGrouper.java| 90 
 9 files changed, 352 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/347619a4/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7716728..c72394e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -37,3 +37,6 @@
 
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
+( Kai Zheng via vinayakumarb )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347619a4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
index 2c851a5..0a86907 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
@@ -79,4 +79,22 @@ public class ECBlockGroup {
 return false;
   }
 
+  /**
+   * Get erased blocks count
+   * @return
+   */
+  public int getErasedCount() {
+int erasedCount = 0;
+
+for (ECBlock dataBlock : dataBlocks) {
+  if (dataBlock.isErased()) erasedCount++;
+}
+
+for (ECBlock parityBlock : parityBlocks) {
+  if (parityBlock.isErased()) erasedCount++;
+}
+
+return erasedCount;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/347619a4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
new file mode 100644
index 000..9993786
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.*;
+import 

[31/50] hadoop git commit: HDFS-7937. Erasure Coding: INodeFile quota computation unit tests. Contributed by Kai Sasaki.

2015-04-29 Thread jing9
HDFS-7937. Erasure Coding: INodeFile quota computation unit tests. Contributed 
by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fbe1383
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fbe1383
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fbe1383

Branch: refs/heads/HDFS-7285
Commit: 5fbe138326e1196b85bfdaaa67f923b90ed0e015
Parents: 4921a4e
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 18:07:07 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:54 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  23 +-
 .../server/namenode/TestStripedINodeFile.java   | 229 +++
 2 files changed, 250 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fbe1383/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 20b0c5c..9f2f5ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -203,8 +203,27 @@ public class BlockInfoStriped extends BlockInfo {
 // In case striped blocks, total usage by this striped blocks should
 // be the total of data blocks and parity blocks because
 // `getNumBytes` is the total of actual data block size.
-return ((getNumBytes() - 1) / (dataBlockNum * BLOCK_STRIPED_CELL_SIZE) + 1)
-* BLOCK_STRIPED_CELL_SIZE * parityBlockNum + getNumBytes();
+
+// 0. Calculate the total bytes per stripes Num Bytes per Stripes
+long numBytesPerStripe = dataBlockNum * BLOCK_STRIPED_CELL_SIZE;
+if (getNumBytes() % numBytesPerStripe == 0) {
+  return getNumBytes() / dataBlockNum * getTotalBlockNum();
+}
+// 1. Calculate the number of stripes in this block group. Num Stripes
+long numStripes = (getNumBytes() - 1) / numBytesPerStripe + 1;
+// 2. Calculate the parity cell length in the last stripe. Note that the
+//size of parity cells should equal the size of the first cell, if it
+//is not full. Last Stripe Parity Cell Length
+long lastStripeParityCellLen = Math.min(getNumBytes() % numBytesPerStripe,
+BLOCK_STRIPED_CELL_SIZE);
+// 3. Total consumed space is the total of
+// - The total of the full cells of data blocks and parity blocks.
+// - The remaining of data block which does not make a stripe.
+// - The last parity block cells. These size should be same
+//   to the first cell in this stripe.
+return getTotalBlockNum() * (BLOCK_STRIPED_CELL_SIZE * (numStripes - 1))
++ getNumBytes() % numBytesPerStripe
++ lastStripeParityCellLen * parityBlockNum;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fbe1383/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
new file mode 100644
index 000..d251c30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+import static 

[35/50] hadoop git commit: HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to create BlockReader. Contributed by Tsz Wo Nicholas Sze.

2015-04-29 Thread jing9
HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to create 
BlockReader. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbeb2c99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbeb2c99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbeb2c99

Branch: refs/heads/HDFS-7285
Commit: dbeb2c9981a3339a2cc3a620e9bd55151b9a6804
Parents: 4b0ad40
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 21 20:56:39 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:55 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../apache/hadoop/hdfs/BlockReaderTestUtil.java |  7 +--
 .../hadoop/hdfs/TestBlockReaderFactory.java | 16 +++---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 58 ++--
 4 files changed, 20 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbeb2c99/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8f28285..d8f2e9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -107,3 +107,6 @@
 
 HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
 (szetszwo)
+
+HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
+create BlockReader. (szetszwo via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbeb2c99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 88b7f37..829cf03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -165,20 +165,19 @@ public class BlockReaderTestUtil {
*/
   public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int 
lenToRead)
   throws IOException {
-return getBlockReader(cluster, testBlock, offset, lenToRead);
+return getBlockReader(cluster.getFileSystem(), testBlock, offset, 
lenToRead);
   }
 
   /**
* Get a BlockReader for the given block.
*/
-  public static BlockReader getBlockReader(MiniDFSCluster cluster,
-  LocatedBlock testBlock, int offset, int lenToRead) throws IOException {
+  public static BlockReader getBlockReader(final DistributedFileSystem fs,
+  LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
 InetSocketAddress targetAddr = null;
 ExtendedBlock block = testBlock.getBlock();
 DatanodeInfo[] nodes = testBlock.getLocations();
 targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
 
-final DistributedFileSystem fs = cluster.getFileSystem();
 return new BlockReaderFactory(fs.getClient().getConf()).
   setInetSocketAddress(targetAddr).
   setBlock(block).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbeb2c99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
index d8aceff..1a767c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
@@ -250,8 +250,8 @@ public class TestBlockReaderFactory {
   LocatedBlock lblock = locatedBlocks.get(0); // first block
   BlockReader blockReader = null;
   try {
-blockReader = BlockReaderTestUtil.
-getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
+blockReader = BlockReaderTestUtil.getBlockReader(
+cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
 Assert.fail(expected getBlockReader to fail the first time.);
   } catch (Throwable t) { 
 Assert.assertTrue(expected to see 'TCP reads were disabled  +
@@ -265,8 +265,8 @@ public class TestBlockReaderFactory {
 
   // Second time should succeed.
   

[14/50] hadoop git commit: HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng

2015-04-29 Thread jing9
HDFS-8104 Make hard-coded values consistent with the system default schema 
first before remove them. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae857573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae857573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae857573

Branch: refs/heads/HDFS-7285
Commit: ae857573dd38f0c689f8bb394c6f9d9d220d3967
Parents: 44aa26a
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 00:16:28 2015 +0800
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:14:10 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |  12 +-
 .../hadoop/hdfs/TestPlanReadPortions.java   | 142 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 112 ---
 4 files changed, 154 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae857573/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5078a15..1e695c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -54,4 +54,6 @@
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
-HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
+
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae857573/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 72b8f38..4c67371 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -173,11 +173,17 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 
-  public static final byte NUM_DATA_BLOCKS = 3;
-  public static final byte NUM_PARITY_BLOCKS = 2;
+
   public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 
+  /*
+   * These values correspond to the values used by the system default schema.
+   * TODO: to be removed once all places use schema.
+   */
+
+  public static final byte NUM_DATA_BLOCKS = 6;
+  public static final byte NUM_PARITY_BLOCKS = 3;
   // The chunk size for striped block which is used by erasure coding
-  public static final int BLOCK_STRIPED_CELL_SIZE = 128 * 1024;
+  public static final int BLOCK_STRIPED_CELL_SIZE = 256 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae857573/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
new file mode 100644
index 000..cf84b30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 

[04/50] hadoop git commit: HDFS-7839. Erasure coding: implement facilities in NameNode to create and manage EC zones. Contributed by Zhe Zhang

2015-04-29 Thread jing9
HDFS-7839. Erasure coding: implement facilities in NameNode to create and 
manage EC zones. Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d717dc3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d717dc3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d717dc3e

Branch: refs/heads/HDFS-7285
Commit: d717dc3e9ee5d81c6e4365e4ed1015b392bdd653
Parents: 45db005
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 2 22:38:29 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:06:49 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  15 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   8 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 -
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  16 ++
 .../BlockStoragePolicySuite.java|   5 -
 .../hdfs/server/common/HdfsServerConstants.java |   2 +
 .../namenode/ErasureCodingZoneManager.java  | 112 ++
 .../hdfs/server/namenode/FSDirRenameOp.java |   2 +
 .../hdfs/server/namenode/FSDirectory.java   |  26 +++-
 .../hdfs/server/namenode/FSNamesystem.java  |  40 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  10 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |   9 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  12 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 151 +++
 .../TestBlockInitialEncoding.java   |  75 -
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java|   6 +-
 .../hdfs/server/namenode/TestFSImage.java   |  23 ++-
 .../namenode/TestRecoverStripedBlocks.java  |   7 +-
 21 files changed, 431 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8fc9e77..da3b0e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2954,6 +2954,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
+  public void createErasureCodingZone(String src)
+  throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
+try {
+  namenode.createErasureCodingZone(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class,
+  UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public void setXAttr(String src, String name, byte[] value, 
   EnumSetXAttrSetFlag flag) throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index bafb02b..8efe344 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1363,6 +1363,14 @@ public interface ClientProtocol {
   long prevId) throws IOException;
 
   /**
+   * Create an erasure coding zone (currently with hardcoded schema)
+   * TODO: Configurable and pluggable schemas (HDFS-7337)
+   */
+  @Idempotent
+  public void createErasureCodingZone(String src)
+  throws IOException;
+
+  /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by .. For example,
* user.attr.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 

[45/50] hadoop git commit: HDFS-8033. Erasure coding: stateful (non-positional) read from files in striped layout. Contributed by Zhe Zhang.

2015-04-29 Thread jing9
HDFS-8033. Erasure coding: stateful (non-positional) read from files in striped 
layout. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8276301
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8276301
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8276301

Branch: refs/heads/HDFS-7285
Commit: f8276301453cf10c6fe0e9cc01a23a61a8ab03ac
Parents: d1e7dfa
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 24 22:36:15 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:17:52 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  55 ++--
 .../hadoop/hdfs/DFSStripedInputStream.java  | 311 ++-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  43 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 110 ++-
 5 files changed, 465 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8276301/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index cf41a9b..e8db485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -131,3 +131,6 @@
 
 HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may 
cause 
 block id conflicts (Jing Zhao via Zhe Zhang)
+
+HDFS-8033. Erasure coding: stateful (non-positional) read from files in 
+striped layout (Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8276301/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 16250dd..6eb25d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -95,34 +95,34 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   public static boolean tcpReadsDisabledForTesting = false;
   private long hedgedReadOpsLoopNumForTesting = 0;
   protected final DFSClient dfsClient;
-  private AtomicBoolean closed = new AtomicBoolean(false);
-  private final String src;
-  private final boolean verifyChecksum;
+  protected AtomicBoolean closed = new AtomicBoolean(false);
+  protected final String src;
+  protected final boolean verifyChecksum;
 
   // state by stateful read only:
   // (protected by lock on this)
   /
   private DatanodeInfo currentNode = null;
-  private LocatedBlock currentLocatedBlock = null;
-  private long pos = 0;
-  private long blockEnd = -1;
+  protected LocatedBlock currentLocatedBlock = null;
+  protected long pos = 0;
+  protected long blockEnd = -1;
   private BlockReader blockReader = null;
   
 
   // state shared by stateful and positional read:
   // (protected by lock on infoLock)
   
-  private LocatedBlocks locatedBlocks = null;
+  protected LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
   private FileEncryptionInfo fileEncryptionInfo = null;
-  private CachingStrategy cachingStrategy;
+  protected CachingStrategy cachingStrategy;
   
 
-  private final ReadStatistics readStatistics = new ReadStatistics();
+  protected final ReadStatistics readStatistics = new ReadStatistics();
   // lock for state shared between read and pread
   // Note: Never acquire a lock on this with this lock held to avoid 
deadlocks
   //   (it's OK to acquire this lock when the lock on this is held)
-  private final Object infoLock = new Object();
+  protected final Object infoLock = new Object();
 
   /**
* Track the ByteBuffers that we have handed out to readers.
@@ -239,7 +239,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* back to the namenode to get a new list of block locations, and is
* capped at maxBlockAcquireFailures
*/
-  private int failures = 0;
+  protected int failures = 0;
 
   /* XXX Use of CocurrentHashMap is temp fix. Need to fix 
* parallel accesses to DFSInputStream (through ptreads) properly */
@@ -476,7 +476,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   /** Fetch a block from namenode and cache it */
-  private void fetchBlockAt(long offset) throws IOException {
+  protected void fetchBlockAt(long 

[48/50] hadoop git commit: HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. Contributed by Kai Sasaki.

2015-04-29 Thread jing9
HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3102e6af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3102e6af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3102e6af

Branch: refs/heads/HDFS-7285
Commit: 3102e6af1c6a7a0ac52d5a52447a5e4690ec9eb4
Parents: 6452510
Author: Jing Zhao ji...@apache.org
Authored: Tue Apr 28 13:42:24 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:17:52 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 -
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java |  7 ++-
 .../apache/hadoop/hdfs/DFSStripedInputStream.java   |  5 +++--
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 16 +++-
 .../org/apache/hadoop/hdfs/TestReadStripedFile.java | 11 ---
 5 files changed, 28 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3102e6af/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 6c5d7ce..9b4bf24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -139,4 +139,7 @@
 commands from standbynode if any (vinayakumarb)
 
 HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated
-as Idempotent (vinayakumarb)
\ No newline at end of file
+as Idempotent (vinayakumarb)
+
+HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open.
+(Kai Sasaki via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3102e6af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 337cfd3..203257b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1179,7 +1179,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 //Get block info from namenode
 TraceScope scope = getPathTraceScope(newDFSInputStream, src);
 try {
-  return new DFSInputStream(this, src, verifyChecksum);
+  ECInfo info = getErasureCodingInfo(src);
+  if (info != null) {
+return new DFSStripedInputStream(this, src, verifyChecksum, info);
+  } else {
+return new DFSInputStream(this, src, verifyChecksum);
+  }
 } finally {
   scope.close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3102e6af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index fe9e101..f6f7ed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -134,11 +134,12 @@ public class DFSStripedInputStream extends DFSInputStream 
{
   private final short parityBlkNum;
   private final ECInfo ecInfo;
 
-  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum)
+  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum, ECInfo info)
   throws IOException {
 super(dfsClient, src, verifyChecksum);
 // ECInfo is restored from NN just before reading striped file.
-ecInfo = dfsClient.getErasureCodingInfo(src);
+assert info != null;
+ecInfo = info;
 cellSize = ecInfo.getSchema().getChunkSize();
 dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
 parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3102e6af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 

[26/50] hadoop git commit: HDFS-8120. Erasure coding: created util class to analyze striped block groups. Contributed by Zhe Zhang and Li Bo.

2015-04-29 Thread jing9
HDFS-8120. Erasure coding: created util class to analyze striped block groups. 
Contributed by Zhe Zhang and Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d27f5dce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d27f5dce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d27f5dce

Branch: refs/heads/HDFS-7285
Commit: d27f5dce8f7cfc7eff009817de65c1acdcf254ca
Parents: 186f139
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:59:27 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:53 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   4 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  77 +++
 .../hadoop/hdfs/DFSStripedOutputStream.java |  34 +++--
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  58 ++--
 .../server/blockmanagement/BlockManager.java|  26 +++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 138 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  91 +++-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  83 +--
 .../apache/hadoop/hdfs/TestReadStripedFile.java |  92 +++--
 .../server/namenode/TestAddStripedBlocks.java   | 107 ++
 .../namenode/TestRecoverStripedBlocks.java  |   3 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 125 +
 12 files changed, 562 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d27f5dce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9104f84..16250dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1148,9 +1148,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 for (int i = 0; i  offsets.length; i++) {
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
   updateReadStatistics(readStatistics, nread, reader);
-  if (nread != len) {
+  if (nread != lengths[i]) {
 throw new IOException(truncated return from reader.read():  +
-excpected  + len + , got  + nread);
+excpected  + lengths[i] + , got  + nread);
   }
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d27f5dce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 8a431b1..d597407 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -50,7 +51,7 @@ import java.util.concurrent.Future;
  *
  * | - Striped Block Group - |
  *  blk_0  blk_1   blk_2   - A striped block group has
- *|  |   |  {@link #groupSize} blocks
+ *|  |   |  {@link #dataBlkNum} blocks
  *v  v   v
  * +--+   +--+   +--+
  * |cell_0|   |cell_1|   |cell_2|  - The logical read order should be
@@ -72,7 +73,7 @@ import java.util.concurrent.Future;
 public class DFSStripedInputStream extends DFSInputStream {
   /**
* This method plans the read portion from each block in the stripe
-   * @param groupSize The size / width of the striping group
+   * @param dataBlkNum The number of data blocks in the striping group
* @param cellSize The size of each striping cell
* @param startInBlk Starting offset in the striped block
* @param len Length of the read request
@@ -81,29 +82,29 @@ public class DFSStripedInputStream extends DFSInputStream {
* for an individual 

[41/50] hadoop git commit: HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may cause block id conflicts. Contributed by Jing Zhao.

2015-04-29 Thread jing9
HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may cause 
block id conflicts. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1e7dfa0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1e7dfa0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1e7dfa0

Branch: refs/heads/HDFS-7285
Commit: d1e7dfa0e0e9bf76f4925936f27b1768339cb5dd
Parents: f24c2c0
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 24 09:30:38 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 11:16:56 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 ++
 .../SequentialBlockGroupIdGenerator.java| 39 +++---
 .../SequentialBlockIdGenerator.java |  2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 57 +++-
 .../server/namenode/TestAddStripedBlocks.java   | 21 
 5 files changed, 77 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e7dfa0/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9357e23..cf41a9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -128,3 +128,6 @@
 
 HDFS-8223. Should calculate checksum for parity blocks in 
DFSStripedOutputStream.
 (Yi Liu via jing9)
+
+HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may 
cause 
+block id conflicts (Jing Zhao via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1e7dfa0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
index e9e22ee..de8e379 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
@@ -19,9 +19,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.util.SequentialNumber;
 
+import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_GROUP_INDEX_MASK;
+import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_BLOCKS_IN_GROUP;
+
 /**
  * Generate the next valid block group ID by incrementing the maximum block
  * group ID allocated so far, with the first 2^10 block group IDs reserved.
@@ -34,6 +36,9 @@ import org.apache.hadoop.util.SequentialNumber;
  * bits (n+2) to (64-m) represent the ID of its block group, while the last m
  * bits represent its index of the group. The value m is determined by the
  * maximum number of blocks in a group (MAX_BLOCKS_IN_GROUP).
+ *
+ * Note that the {@link #nextValue()} methods requires external lock to
+ * guarantee IDs have no conflicts.
  */
 @InterfaceAudience.Private
 public class SequentialBlockGroupIdGenerator extends SequentialNumber {
@@ -47,32 +52,30 @@ public class SequentialBlockGroupIdGenerator extends 
SequentialNumber {
 
   @Override // NumberGenerator
   public long nextValue() {
-// Skip to next legitimate block group ID based on the naming protocol
-while (super.getCurrentValue() % HdfsConstants.MAX_BLOCKS_IN_GROUP  0) {
-  super.nextValue();
-}
+skipTo((getCurrentValue()  ~BLOCK_GROUP_INDEX_MASK) + 
MAX_BLOCKS_IN_GROUP);
 // Make sure there's no conflict with existing random block IDs
-while (hasValidBlockInRange(super.getCurrentValue())) {
-  super.skipTo(super.getCurrentValue() +
-  HdfsConstants.MAX_BLOCKS_IN_GROUP);
+final Block b = new Block(getCurrentValue());
+while (hasValidBlockInRange(b)) {
+  skipTo(getCurrentValue() + MAX_BLOCKS_IN_GROUP);
+  b.setBlockId(getCurrentValue());
 }
-if (super.getCurrentValue() = 0) {
-  BlockManager.LOG.warn(All negative block group IDs are used,  +
-  growing into positive IDs,  +
-  which might conflict with non-erasure coded blocks.);
+if (b.getBlockId() = 0) {
+  throw new IllegalStateException(All 

hadoop git commit: HDFS-8269. getBlockLocations() does not resolve the .reserved path and generates incorrect edit logs when updating the atime. Contributed by Haohui Mai.

2015-04-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ecdebb736 - 460127e6f


HDFS-8269. getBlockLocations() does not resolve the .reserved path and 
generates incorrect edit logs when updating the atime. Contributed by Haohui 
Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/460127e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/460127e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/460127e6

Branch: refs/heads/branch-2
Commit: 460127e6f2dc172240fbcf1271ddc1691f1910f0
Parents: ecdebb7
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 29 11:12:45 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 29 11:12:54 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  63 ++---
 .../hdfs/server/namenode/NamenodeFsck.java  |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   8 +-
 .../server/namenode/TestGetBlockLocations.java  | 133 +++
 5 files changed, 188 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/460127e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8a4fd79..f9a6949 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -311,6 +311,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
 lock. (wheat9)
 
+HDFS-8269. getBlockLocations() does not resolve the .reserved path and
+generates incorrect edit logs when updating the atime. (wheat9)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/460127e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d0d5f70..a92eba6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1687,13 +1687,14 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   static class GetBlockLocationsResult {
-final INodesInPath iip;
+final boolean updateAccessTime;
 final LocatedBlocks blocks;
 boolean updateAccessTime() {
-  return iip != null;
+  return updateAccessTime;
 }
-private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
-  this.iip = iip;
+private GetBlockLocationsResult(
+boolean updateAccessTime, LocatedBlocks blocks) {
+  this.updateAccessTime = updateAccessTime;
   this.blocks = blocks;
 }
   }
@@ -1702,34 +1703,58 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
-  LocatedBlocks getBlockLocations(String clientMachine, String src,
+  LocatedBlocks getBlockLocations(String clientMachine, String srcArg,
   long offset, long length) throws IOException {
 checkOperation(OperationCategory.READ);
 GetBlockLocationsResult res = null;
+FSPermissionChecker pc = getPermissionChecker();
 readLock();
 try {
   checkOperation(OperationCategory.READ);
-  res = getBlockLocations(src, offset, length, true, true);
+  res = getBlockLocations(pc, srcArg, offset, length, true, true);
 } catch (AccessControlException e) {
-  logAuditEvent(false, open, src);
+  logAuditEvent(false, open, srcArg);
   throw e;
 } finally {
   readUnlock();
 }
 
-logAuditEvent(true, open, src);
+logAuditEvent(true, open, srcArg);
 
 if (res.updateAccessTime()) {
+  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
+  srcArg);
+  String src = srcArg;
   writeLock();
   final long now = now();
   try {
 checkOperation(OperationCategory.WRITE);
-INode inode = res.iip.getLastINode();
-boolean updateAccessTime = now  inode.getAccessTime() +
-getAccessTimePrecision();
+/**
+ * Resolve the path again and update the atime only when the file
+ * exists.
+ *
+ * XXX: Races can still occur even after 

hadoop git commit: HDFS-8269. getBlockLocations() does not resolve the .reserved path and generates incorrect edit logs when updating the atime. Contributed by Haohui Mai.

2015-04-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7947e5b53 - 3dd6395bb


HDFS-8269. getBlockLocations() does not resolve the .reserved path and 
generates incorrect edit logs when updating the atime. Contributed by Haohui 
Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dd6395b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dd6395b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dd6395b

Branch: refs/heads/trunk
Commit: 3dd6395bb2448e5b178a51c864e3c9a3d12e8bc9
Parents: 7947e5b
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 29 11:12:45 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 29 11:12:45 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  63 ++---
 .../hdfs/server/namenode/NamenodeFsck.java  |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   8 +-
 .../server/namenode/TestGetBlockLocations.java  | 133 +++
 5 files changed, 188 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dd6395b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2dde356..a3f219b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -635,6 +635,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
 lock. (wheat9)
 
+HDFS-8269. getBlockLocations() does not resolve the .reserved path and
+generates incorrect edit logs when updating the atime. (wheat9)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dd6395b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0ec81d8..b938263 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1697,13 +1697,14 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   static class GetBlockLocationsResult {
-final INodesInPath iip;
+final boolean updateAccessTime;
 final LocatedBlocks blocks;
 boolean updateAccessTime() {
-  return iip != null;
+  return updateAccessTime;
 }
-private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
-  this.iip = iip;
+private GetBlockLocationsResult(
+boolean updateAccessTime, LocatedBlocks blocks) {
+  this.updateAccessTime = updateAccessTime;
   this.blocks = blocks;
 }
   }
@@ -1712,34 +1713,58 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
-  LocatedBlocks getBlockLocations(String clientMachine, String src,
+  LocatedBlocks getBlockLocations(String clientMachine, String srcArg,
   long offset, long length) throws IOException {
 checkOperation(OperationCategory.READ);
 GetBlockLocationsResult res = null;
+FSPermissionChecker pc = getPermissionChecker();
 readLock();
 try {
   checkOperation(OperationCategory.READ);
-  res = getBlockLocations(src, offset, length, true, true);
+  res = getBlockLocations(pc, srcArg, offset, length, true, true);
 } catch (AccessControlException e) {
-  logAuditEvent(false, open, src);
+  logAuditEvent(false, open, srcArg);
   throw e;
 } finally {
   readUnlock();
 }
 
-logAuditEvent(true, open, src);
+logAuditEvent(true, open, srcArg);
 
 if (res.updateAccessTime()) {
+  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
+  srcArg);
+  String src = srcArg;
   writeLock();
   final long now = now();
   try {
 checkOperation(OperationCategory.WRITE);
-INode inode = res.iip.getLastINode();
-boolean updateAccessTime = now  inode.getAccessTime() +
-getAccessTimePrecision();
+/**
+ * Resolve the path again and update the atime only when the file
+ * exists.
+ *
+ * XXX: Races can still occur even after 

hadoop git commit: HDFS-8283. DataStreamer cleanup and some minor improvement. Contributed by Tsz Wo Nicholas Sze.

2015-04-29 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4cc38df7d - ecdebb736


HDFS-8283. DataStreamer cleanup and some minor improvement. Contributed by Tsz 
Wo Nicholas Sze.

(cherry picked from commit 7947e5b53b9ac9524b535b0384c1c355b74723ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecdebb73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecdebb73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecdebb73

Branch: refs/heads/branch-2
Commit: ecdebb7369008883a76242025cf1078b117857d3
Parents: 4cc38df
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 29 10:41:46 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Apr 29 10:43:52 2015 -0700

--
 .../apache/hadoop/io/MultipleIOException.java   |  26 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  30 +--
 .../org/apache/hadoop/hdfs/DataStreamer.java| 235 ++-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java |   3 +-
 5 files changed, 162 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecdebb73/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
index 5e584c9c..66c1ab1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.io;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -51,4 +52,29 @@ public class MultipleIOException extends IOException {
 }
 return new MultipleIOException(exceptions);
   }
+
+  /**
+   * Build an {@link IOException} using {@link MultipleIOException}
+   * if there are more than one.
+   */
+  public static class Builder {
+private ListIOException exceptions;
+
+/** Add the given {@link Throwable} to the exception list. */
+public void add(Throwable t) {
+  if (exceptions == null) {
+exceptions = new ArrayList();
+  }
+  exceptions.add(t instanceof IOException? (IOException)t
+  : new IOException(t));
+}
+
+/**
+ * @return null if nothing is added to this builder;
+ * otherwise, return an {@link IOException}
+ */
+public IOException build() {
+  return createIOException(exceptions);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecdebb73/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0f97ac3..8a4fd79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -156,6 +156,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8280. Code Cleanup in DFSInputStream. (Jing Zhao via wheat9)
 
+HDFS-8283. DataStreamer cleanup and some minor improvement. (szetszwo via
+jing9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecdebb73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 47885e9..f902d21 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -139,8 +139,7 @@ public class DFSOutputStream extends FSOutputSummer
   @Override
   protected void checkClosed() throws IOException {
 if (isClosed()) {
-  IOException e = streamer.getLastException().get();
-  throw e != null ? e : new ClosedChannelException();
+  streamer.getLastException().throwException4Close();
 }
   }
 
@@ -216,10 +215,7 @@ public class DFSOutputStream extends FSOutputSummer
 computePacketChunkSize(dfsClient.getConf().getWritePacketSize(), 
bytesPerChecksum);
 
 streamer = new DataStreamer(stat, null, dfsClient, src, progress, checksum,
-cachingStrategy, byteArrayManager);
-if 

hadoop git commit: HDFS-8269. getBlockLocations() does not resolve the .reserved path and generates incorrect edit logs when updating the atime. Contributed by Haohui Mai.

2015-04-29 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 396c41b1d - 6ac2b5712


HDFS-8269. getBlockLocations() does not resolve the .reserved path and 
generates incorrect edit logs when updating the atime. Contributed by Haohui 
Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ac2b571
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ac2b571
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ac2b571

Branch: refs/heads/branch-2.7
Commit: 6ac2b5712b05eb9032a7ee1fc14ca2ffed5e5ef1
Parents: 396c41b
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 29 11:12:45 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 29 11:13:03 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  63 ++---
 .../hdfs/server/namenode/NamenodeFsck.java  |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   8 +-
 .../server/namenode/TestGetBlockLocations.java  | 133 +++
 5 files changed, 188 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ac2b571/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1a99e3f..4ad47a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -53,6 +53,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
 lock. (wheat9)
 
+HDFS-8269. getBlockLocations() does not resolve the .reserved path and
+generates incorrect edit logs when updating the atime. (wheat9)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ac2b571/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 94e7f6c..de504c4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1685,13 +1685,14 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   static class GetBlockLocationsResult {
-final INodesInPath iip;
+final boolean updateAccessTime;
 final LocatedBlocks blocks;
 boolean updateAccessTime() {
-  return iip != null;
+  return updateAccessTime;
 }
-private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
-  this.iip = iip;
+private GetBlockLocationsResult(
+boolean updateAccessTime, LocatedBlocks blocks) {
+  this.updateAccessTime = updateAccessTime;
   this.blocks = blocks;
 }
   }
@@ -1700,34 +1701,58 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
-  LocatedBlocks getBlockLocations(String clientMachine, String src,
+  LocatedBlocks getBlockLocations(String clientMachine, String srcArg,
   long offset, long length) throws IOException {
 checkOperation(OperationCategory.READ);
 GetBlockLocationsResult res = null;
+FSPermissionChecker pc = getPermissionChecker();
 readLock();
 try {
   checkOperation(OperationCategory.READ);
-  res = getBlockLocations(src, offset, length, true, true);
+  res = getBlockLocations(pc, srcArg, offset, length, true, true);
 } catch (AccessControlException e) {
-  logAuditEvent(false, open, src);
+  logAuditEvent(false, open, srcArg);
   throw e;
 } finally {
   readUnlock();
 }
 
-logAuditEvent(true, open, src);
+logAuditEvent(true, open, srcArg);
 
 if (res.updateAccessTime()) {
+  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
+  srcArg);
+  String src = srcArg;
   writeLock();
   final long now = now();
   try {
 checkOperation(OperationCategory.WRITE);
-INode inode = res.iip.getLastINode();
-boolean updateAccessTime = now  inode.getAccessTime() +
-getAccessTimePrecision();
+/**
+ * Resolve the path again and update the atime only when the file
+ * exists.
+ *
+ * XXX: Races can still occur even after 

hadoop git commit: YARN-3533. Test: Fix launchAM in MockRM to wait for attempt to be scheduled. Contributed by Anubhav Dhoot

2015-04-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e215484b - 4c1af156a


YARN-3533. Test: Fix launchAM in MockRM to wait for attempt to be scheduled. 
Contributed by Anubhav Dhoot


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c1af156
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c1af156
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c1af156

Branch: refs/heads/trunk
Commit: 4c1af156aef4f3bb1d9823d5980c59b12007dc77
Parents: 2e21548
Author: Jian He jia...@apache.org
Authored: Wed Apr 29 14:50:01 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Apr 29 14:50:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/MockRM.java| 8 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c1af156/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6b8bde9..f583e6a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -271,6 +271,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3517. RM web ui for dumping scheduler logs should be for admins only
 (Varun Vasudev via tgraves)
 
+YARN-3533. Test: Fix launchAM in MockRM to wait for attempt to be 
scheduled.
+(Anubhav Dhoot via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c1af156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index f2b1d86..63d6557 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -164,6 +164,8 @@ public class MockRM extends ResourceManager {
   nm.nodeHeartbeat(true);
   Thread.sleep(200);
 }
+Assert.assertNotNull(Failed in waiting for  + containerId +   +
+allocation., getResourceScheduler().getRMContainer(containerId));
   }
 
   public void waitForContainerToComplete(RMAppAttempt attempt,
@@ -662,7 +664,7 @@ public class MockRM extends ResourceManager {
 am.waitForState(RMAppAttemptState.FINISHED);
 rm.waitForState(rmApp.getApplicationId(), RMAppState.FINISHED);
   }
-  
+
   @SuppressWarnings(rawtypes)
   private static void waitForSchedulerAppAttemptAdded(
   ApplicationAttemptId attemptId, MockRM rm) throws InterruptedException {
@@ -677,6 +679,9 @@ public class MockRM extends ResourceManager {
   }
   tick++;
 }
+Assert.assertNotNull(Timed out waiting for SchedulerApplicationAttempt= +
+  attemptId +  to be added., ((AbstractYarnScheduler)
+rm.getResourceScheduler()).getApplicationAttempt(attemptId));
   }
 
   public static MockAM launchAM(RMApp app, MockRM rm, MockNM nm)
@@ -684,6 +689,7 @@ public class MockRM extends ResourceManager {
 rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
 RMAppAttempt attempt = app.getCurrentAppAttempt();
 waitForSchedulerAppAttemptAdded(attempt.getAppAttemptId(), rm);
+rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.SCHEDULED);
 System.out.println(Launch AM  + attempt.getAppAttemptId());
 nm.nodeHeartbeat(true);
 MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());



hadoop git commit: YARN-3517. RM web ui for dumping scheduler logs should be for admins only (Varun Vasudev via tgraves)

2015-04-29 Thread tgraves
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3dd6395bb - 2e215484b


YARN-3517. RM web ui for dumping scheduler logs should be for admins only 
(Varun Vasudev via tgraves)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e215484
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e215484
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e215484

Branch: refs/heads/trunk
Commit: 2e215484bd05cd5e3b7a81d3558c6879a05dd2d2
Parents: 3dd6395
Author: tgraves tgra...@apache.org
Authored: Wed Apr 29 21:25:42 2015 +
Committer: tgraves tgra...@apache.org
Committed: Wed Apr 29 21:25:42 2015 +

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/security/ApplicationACLsManager.java | 11 +++
 .../webapp/CapacitySchedulerPage.java   | 51 +
 .../resourcemanager/webapp/RMWebServices.java   | 13 +++-
 .../webapp/TestRMWebServices.java   | 77 
 5 files changed, 139 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e215484/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b5581d6..6b8bde9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -268,6 +268,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2740. Fix NodeLabelsManager to properly handle node label 
modifications 
 when distributed node label configuration enabled. (Naganarasimha G R via 
wangda)
 
+YARN-3517. RM web ui for dumping scheduler logs should be for admins only
+(Varun Vasudev via tgraves)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e215484/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
index 4daaa68..97b4163 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
@@ -138,4 +138,15 @@ public class ApplicationACLsManager {
 }
 return false;
   }
+
+  /**
+   * Check if the given user in an admin.
+   *
+   * @param calledUGI
+   *  UserGroupInformation for the user
+   * @return true if the user is an admin, false otherwise
+   */
+  public final boolean isAdmin(final UserGroupInformation calledUGI) {
+return this.adminAclsManager.isAdmin(calledUGI);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e215484/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 2eeda66..fa22a0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerHealth;
@@ -33,6 +34,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
 import 

hadoop git commit: YARN-3533. Test: Fix launchAM in MockRM to wait for attempt to be scheduled. Contributed by Anubhav Dhoot (cherry picked from commit 4c1af156aef4f3bb1d9823d5980c59b12007dc77)

2015-04-29 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e13183f6 - a7a5737bc


YARN-3533. Test: Fix launchAM in MockRM to wait for attempt to be scheduled. 
Contributed by Anubhav Dhoot
(cherry picked from commit 4c1af156aef4f3bb1d9823d5980c59b12007dc77)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7a5737b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7a5737b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7a5737b

Branch: refs/heads/branch-2
Commit: a7a5737bc46c9c9e1aa2bcc5c8a3e06d15394891
Parents: 2e13183
Author: Jian He jia...@apache.org
Authored: Wed Apr 29 14:50:01 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Wed Apr 29 14:51:04 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/MockRM.java| 8 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7a5737b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f4907f..fe8221f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -223,6 +223,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3517. RM web ui for dumping scheduler logs should be for admins only
 (Varun Vasudev via tgraves)
 
+YARN-3533. Test: Fix launchAM in MockRM to wait for attempt to be 
scheduled.
+(Anubhav Dhoot via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7a5737b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index f2b1d86..63d6557 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -164,6 +164,8 @@ public class MockRM extends ResourceManager {
   nm.nodeHeartbeat(true);
   Thread.sleep(200);
 }
+Assert.assertNotNull(Failed in waiting for  + containerId +   +
+allocation., getResourceScheduler().getRMContainer(containerId));
   }
 
   public void waitForContainerToComplete(RMAppAttempt attempt,
@@ -662,7 +664,7 @@ public class MockRM extends ResourceManager {
 am.waitForState(RMAppAttemptState.FINISHED);
 rm.waitForState(rmApp.getApplicationId(), RMAppState.FINISHED);
   }
-  
+
   @SuppressWarnings(rawtypes)
   private static void waitForSchedulerAppAttemptAdded(
   ApplicationAttemptId attemptId, MockRM rm) throws InterruptedException {
@@ -677,6 +679,9 @@ public class MockRM extends ResourceManager {
   }
   tick++;
 }
+Assert.assertNotNull(Timed out waiting for SchedulerApplicationAttempt= +
+  attemptId +  to be added., ((AbstractYarnScheduler)
+rm.getResourceScheduler()).getApplicationAttempt(attemptId));
   }
 
   public static MockAM launchAM(RMApp app, MockRM rm, MockNM nm)
@@ -684,6 +689,7 @@ public class MockRM extends ResourceManager {
 rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
 RMAppAttempt attempt = app.getCurrentAppAttempt();
 waitForSchedulerAppAttemptAdded(attempt.getAppAttemptId(), rm);
+rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.SCHEDULED);
 System.out.println(Launch AM  + attempt.getAppAttemptId());
 nm.nodeHeartbeat(true);
 MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());



hadoop git commit: YARN-3517. RM web ui for dumping scheduler logs should be for admins only (Varun Vasudev via tgraves) (cherry picked from commit 2e215484bd05cd5e3b7a81d3558c6879a05dd2d2)

2015-04-29 Thread tgraves
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 460127e6f - 2e13183f6


YARN-3517. RM web ui for dumping scheduler logs should be for admins only 
(Varun Vasudev via tgraves)
(cherry picked from commit 2e215484bd05cd5e3b7a81d3558c6879a05dd2d2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e13183f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e13183f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e13183f

Branch: refs/heads/branch-2
Commit: 2e13183f6010182aef7b1dfec2f9c1f1e9968011
Parents: 460127e
Author: tgraves tgra...@apache.org
Authored: Wed Apr 29 21:25:42 2015 +
Committer: tgraves tgra...@apache.org
Committed: Wed Apr 29 21:27:16 2015 +

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/security/ApplicationACLsManager.java | 11 +++
 .../webapp/CapacitySchedulerPage.java   | 51 +
 .../resourcemanager/webapp/RMWebServices.java   | 13 +++-
 .../webapp/TestRMWebServices.java   | 77 
 5 files changed, 139 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e13183f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 98e42c1..8f4907f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -220,6 +220,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2740. Fix NodeLabelsManager to properly handle node label 
modifications 
 when distributed node label configuration enabled. (Naganarasimha G R via 
wangda)
 
+YARN-3517. RM web ui for dumping scheduler logs should be for admins only
+(Varun Vasudev via tgraves)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e13183f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
index 4daaa68..97b4163 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
@@ -138,4 +138,15 @@ public class ApplicationACLsManager {
 }
 return false;
   }
+
+  /**
+   * Check if the given user in an admin.
+   *
+   * @param calledUGI
+   *  UserGroupInformation for the user
+   * @return true if the user is an admin, false otherwise
+   */
+  public final boolean isAdmin(final UserGroupInformation calledUGI) {
+return this.adminAclsManager.isAdmin(calledUGI);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e13183f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 2eeda66..fa22a0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerHealth;
@@ -33,6 +34,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import 

hadoop git commit: HDFS-8272. Erasure Coding: simplify the retry logic in DFSStripedInputStream (stateful read). Contributed by Jing Zhao

2015-04-29 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 f3d0e5588 - fc4fd38af


HDFS-8272. Erasure Coding: simplify the retry logic in DFSStripedInputStream 
(stateful read). Contributed by Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc4fd38a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc4fd38a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc4fd38a

Branch: refs/heads/HDFS-7285
Commit: fc4fd38af0ab566bbfe9f9523785ac553bc6610d
Parents: f3d0e55
Author: Zhe Zhang z...@apache.org
Authored: Wed Apr 29 15:53:31 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Wed Apr 29 15:53:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  | 336 ---
 2 files changed, 150 insertions(+), 189 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc4fd38a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9b4bf24..6a9bdee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -143,3 +143,6 @@
 
 HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open.
 (Kai Sasaki via jing9)
+
+HDFS-8272. Erasure Coding: simplify the retry logic in 
DFSStripedInputStream 
+(stateful read). (Jing Zhao via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc4fd38a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index f6f7ed2..3da7306 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -22,11 +22,8 @@ import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.*;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
@@ -126,23 +123,42 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return results;
   }
 
+  private static class ReaderRetryPolicy {
+private int fetchEncryptionKeyTimes = 1;
+private int fetchTokenTimes = 1;
+
+void refetchEncryptionKey() {
+  fetchEncryptionKeyTimes--;
+}
+
+void refetchToken() {
+  fetchTokenTimes--;
+}
+
+boolean shouldRefetchEncryptionKey() {
+  return fetchEncryptionKeyTimes  0;
+}
+
+boolean shouldRefetchToken() {
+  return fetchTokenTimes  0;
+}
+  }
+
   private final short groupSize = HdfsConstants.NUM_DATA_BLOCKS;
-  private BlockReader[] blockReaders = null;
-  private DatanodeInfo[] currentNodes = null;
+  private final BlockReader[] blockReaders = new BlockReader[groupSize];
+  private final DatanodeInfo[] currentNodes = new DatanodeInfo[groupSize];
   private final int cellSize;
   private final short dataBlkNum;
   private final short parityBlkNum;
-  private final ECInfo ecInfo;
 
-  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum, ECInfo info)
-  throws IOException {
+  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum,
+  ECInfo ecInfo) throws IOException {
 super(dfsClient, src, verifyChecksum);
 // ECInfo is restored from NN just before reading striped file.
-assert info != null;
-ecInfo = info;
+assert ecInfo != null;
 cellSize = ecInfo.getSchema().getChunkSize();
-dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
-parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();
+dataBlkNum = (short) ecInfo.getSchema().getNumDataUnits();
+parityBlkNum = (short) ecInfo.getSchema().getNumParityUnits();
 DFSClient.LOG.debug(Creating an striped input stream for file  + src);
   }
 
@@ -162,9 +178,7 @@ public class DFSStripedInputStream extends DFSInputStream {

hadoop git commit: HDFS-8214. Secondary NN Web UI shows wrong date for Last Checkpoint. Contributed by Charles Lamb.

2015-04-29 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4c1af156a - aa2245044


HDFS-8214. Secondary NN Web UI shows wrong date for Last Checkpoint. 
Contributed by Charles Lamb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa224504
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa224504
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa224504

Branch: refs/heads/trunk
Commit: aa22450442ebe39916a6fd460fe97e347945526d
Parents: 4c1af15
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 29 17:37:56 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 29 17:37:56 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/namenode/SecondaryNameNode.java | 35 ++--
 .../namenode/SecondaryNameNodeInfoMXBean.java   |  6 
 .../src/main/webapps/secondary/status.html  |  2 +-
 .../src/main/webapps/static/dfs-dust.js |  3 ++
 5 files changed, 36 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa224504/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a3f219b..fbeb45d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -582,6 +582,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8232. Missing datanode counters when using Metrics2 sink interface.
 (Anu Engineer via cnauroth)
 
+HDFS-8214. Secondary NN Web UI shows wrong date for Last Checkpoint. 
(clamb via wang)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa224504/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index ec7e0c9..b499e74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -108,6 +108,7 @@ public class SecondaryNameNode implements Runnable,
 
   private final long starttime = Time.now();
   private volatile long lastCheckpointTime = 0;
+  private volatile long lastCheckpointWallclockTime = 0;
 
   private URL fsName;
   private CheckpointStorage checkpointImage;
@@ -134,8 +135,9 @@ public class SecondaryNameNode implements Runnable,
   + \nName Node Address  :  + nameNodeAddr
   + \nStart Time :  + new Date(starttime)
   + \nLast Checkpoint:  + (lastCheckpointTime == 0? --:
-  ((Time.monotonicNow() - 
lastCheckpointTime) / 1000))
-   +  seconds ago
+new Date(lastCheckpointWallclockTime))
+  +  ( + ((Time.monotonicNow() - lastCheckpointTime) / 1000)
+  +  seconds ago)
   + \nCheckpoint Period  :  + checkpointConf.getPeriod() +  seconds
   + \nCheckpoint Transactions:  + checkpointConf.getTxnCount()
   + \nCheckpoint Dirs:  + checkpointDirs
@@ -388,12 +390,14 @@ public class SecondaryNameNode implements Runnable,
 if(UserGroupInformation.isSecurityEnabled())
   UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
 
-final long now = Time.monotonicNow();
+final long monotonicNow = Time.monotonicNow();
+final long now = Time.now();
 
 if (shouldCheckpointBasedOnCount() ||
-now = lastCheckpointTime + 1000 * checkpointConf.getPeriod()) {
+monotonicNow = lastCheckpointTime + 1000 * 
checkpointConf.getPeriod()) {
   doCheckpoint();
-  lastCheckpointTime = now;
+  lastCheckpointTime = monotonicNow;
+  lastCheckpointWallclockTime = now;
 }
   } catch (IOException e) {
 LOG.error(Exception in doCheckpoint, e);
@@ -695,22 +699,31 @@ public class SecondaryNameNode implements Runnable,
 checkpointThread.start();
   }
 
-  @Override // SecondaryNameNodeInfoMXXBean
+  @Override // SecondaryNameNodeInfoMXBean
   public String getHostAndPort() {
 return NetUtils.getHostPortString(nameNodeAddr);
   }
 
-  @Override // SecondaryNameNodeInfoMXXBean
+  @Override // SecondaryNameNodeInfoMXBean
   public long getStartTime() {
 return starttime;
   }
 
-  @Override // SecondaryNameNodeInfoMXXBean
+  

hadoop git commit: HDFS-8214. Secondary NN Web UI shows wrong date for Last Checkpoint. Contributed by Charles Lamb.

2015-04-29 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a7a5737bc - 9f8412ac6


HDFS-8214. Secondary NN Web UI shows wrong date for Last Checkpoint. 
Contributed by Charles Lamb.

(cherry picked from commit aa22450442ebe39916a6fd460fe97e347945526d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f8412ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f8412ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f8412ac

Branch: refs/heads/branch-2
Commit: 9f8412ac6ee0d0fa7c16f61939b9116061336910
Parents: a7a5737
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 29 17:37:56 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 29 17:38:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/namenode/SecondaryNameNode.java | 35 ++--
 .../namenode/SecondaryNameNodeInfoMXBean.java   |  6 
 .../src/main/webapps/secondary/status.html  |  2 +-
 .../src/main/webapps/static/dfs-dust.js |  3 ++
 5 files changed, 36 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f8412ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f9a6949..9cad1ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -258,6 +258,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8232. Missing datanode counters when using Metrics2 sink interface.
 (Anu Engineer via cnauroth)
 
+HDFS-8214. Secondary NN Web UI shows wrong date for Last Checkpoint. 
(clamb via wang)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f8412ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index ec7e0c9..b499e74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -108,6 +108,7 @@ public class SecondaryNameNode implements Runnable,
 
   private final long starttime = Time.now();
   private volatile long lastCheckpointTime = 0;
+  private volatile long lastCheckpointWallclockTime = 0;
 
   private URL fsName;
   private CheckpointStorage checkpointImage;
@@ -134,8 +135,9 @@ public class SecondaryNameNode implements Runnable,
   + \nName Node Address  :  + nameNodeAddr
   + \nStart Time :  + new Date(starttime)
   + \nLast Checkpoint:  + (lastCheckpointTime == 0? --:
-  ((Time.monotonicNow() - 
lastCheckpointTime) / 1000))
-   +  seconds ago
+new Date(lastCheckpointWallclockTime))
+  +  ( + ((Time.monotonicNow() - lastCheckpointTime) / 1000)
+  +  seconds ago)
   + \nCheckpoint Period  :  + checkpointConf.getPeriod() +  seconds
   + \nCheckpoint Transactions:  + checkpointConf.getTxnCount()
   + \nCheckpoint Dirs:  + checkpointDirs
@@ -388,12 +390,14 @@ public class SecondaryNameNode implements Runnable,
 if(UserGroupInformation.isSecurityEnabled())
   UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
 
-final long now = Time.monotonicNow();
+final long monotonicNow = Time.monotonicNow();
+final long now = Time.now();
 
 if (shouldCheckpointBasedOnCount() ||
-now = lastCheckpointTime + 1000 * checkpointConf.getPeriod()) {
+monotonicNow = lastCheckpointTime + 1000 * 
checkpointConf.getPeriod()) {
   doCheckpoint();
-  lastCheckpointTime = now;
+  lastCheckpointTime = monotonicNow;
+  lastCheckpointWallclockTime = now;
 }
   } catch (IOException e) {
 LOG.error(Exception in doCheckpoint, e);
@@ -695,22 +699,31 @@ public class SecondaryNameNode implements Runnable,
 checkpointThread.start();
   }
 
-  @Override // SecondaryNameNodeInfoMXXBean
+  @Override // SecondaryNameNodeInfoMXBean
   public String getHostAndPort() {
 return NetUtils.getHostPortString(nameNodeAddr);
   }
 
-  @Override // SecondaryNameNodeInfoMXXBean
+  @Override // SecondaryNameNodeInfoMXBean
   public long getStartTime() {