HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in 
hadoop-hdfs. Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acbe42a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acbe42a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acbe42a8

Branch: refs/heads/HDFS-7285-merge
Commit: acbe42a85216cea5c9e3a1135dc5318a27329bde
Parents: ecf3634
Author: Zhe Zhang <zhezh...@cloudera.com>
Authored: Wed Aug 12 11:21:43 2015 -0700
Committer: Zhe Zhang <zhezh...@cloudera.com>
Committed: Fri Aug 14 10:54:43 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/io/erasurecode/ECSchema.java  |  42 +----
 .../hadoop/io/erasurecode/SchemaLoader.java     | 152 -------------------
 .../hadoop/io/erasurecode/TestECSchema.java     |   6 +-
 .../hadoop/io/erasurecode/TestSchemaLoader.java |  74 ---------
 .../hdfs/client/HdfsClientConfigKeys.java       |   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  10 +-
 .../hdfs/protocol/ErasureCodingPolicy.java      |  93 ++++++++++++
 .../hadoop/hdfs/protocol/ErasureCodingZone.java |  26 +---
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   4 +-
 .../hadoop/hdfs/protocol/HdfsFileStatus.java    |  17 +--
 .../hadoop/hdfs/protocol/LocatedBlocks.java     |  25 +--
 .../protocol/SnapshottableDirectoryStatus.java  |   2 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   4 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   4 +-
 .../src/main/proto/erasurecoding.proto          |  17 +--
 .../src/main/proto/hdfs.proto                   |  21 +--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt        |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  20 +--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 -
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   2 +-
 .../hadoop/hdfs/DFSStripedInputStream.java      |  20 +--
 .../hadoop/hdfs/DFSStripedOutputStream.java     |  10 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |  13 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  23 ++-
 .../hdfs/protocol/HdfsLocatedFileStatus.java    |   5 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  25 ++-
 .../ClientNamenodeProtocolTranslatorPB.java     |  36 +++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  59 ++++---
 .../blockmanagement/BlockInfoStriped.java       |  37 ++---
 .../BlockInfoStripedUnderConstruction.java      |  13 +-
 .../server/blockmanagement/BlockManager.java    |  15 +-
 .../blockmanagement/DatanodeDescriptor.java     |   6 +-
 .../hdfs/server/datanode/StorageLocation.java   |   2 +-
 .../erasurecode/ErasureCodingWorker.java        |  11 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  14 +-
 .../namenode/ErasureCodingPolicyManager.java    | 115 ++++++++++++++
 .../namenode/ErasureCodingSchemaManager.java    | 127 ----------------
 .../namenode/ErasureCodingZoneManager.java      |  45 +++---
 .../server/namenode/FSDirErasureCodingOp.java   |  47 ++----
 .../server/namenode/FSDirStatAndListingOp.java  |  18 +--
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  11 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   8 +-
 .../server/namenode/FSImageFormatPBINode.java   |  23 +--
 .../hdfs/server/namenode/FSNamesystem.java      |  52 +++----
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |  10 +-
 .../server/protocol/BlockECRecoveryCommand.java |  23 +--
 .../hdfs/tools/erasurecode/ECCommand.java       |  80 +++++-----
 .../hadoop/hdfs/util/StripedBlockUtil.java      |  56 +++----
 .../hadoop-hdfs/src/main/proto/fsimage.proto    |   1 -
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java       |   6 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  16 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     |   2 +-
 .../apache/hadoop/hdfs/TestDatanodeConfig.java  |   4 +-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   |  54 -------
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   2 +-
 .../hadoop/hdfs/TestErasureCodingZones.java     |  58 +++----
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java |  65 ++++++++
 .../hadoop/hdfs/TestFileStatusWithECschema.java |  65 --------
 .../java/org/apache/hadoop/hdfs/TestLease.java  |   4 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   |   3 +-
 .../TestReadStripedFileWithMissingBlocks.java   |   3 +-
 .../hadoop/hdfs/TestRecoverStripedFile.java     |   2 +-
 .../hdfs/TestSafeModeWithStripedFile.java       |   3 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |   3 +-
 .../hdfs/TestWriteStripedFileWithFailure.java   |   5 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    |  34 ++---
 .../hdfs/server/balancer/TestBalancer.java      |   2 +-
 .../blockmanagement/TestBlockInfoStriped.java   |  14 +-
 .../TestBlockTokenWithDFSStriped.java           |   2 +-
 .../TestSequentialBlockGroupId.java             |   2 +-
 .../TestUnderReplicatedBlockQueues.java         |  19 +--
 .../hadoop/hdfs/server/mover/TestMover.java     |   2 +-
 .../TestAddOverReplicatedStripedBlocks.java     |   4 +-
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java    |  19 +--
 .../hdfs/server/namenode/TestFSImage.java       |  13 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   8 +-
 .../namenode/TestQuotaWithStripedBlocks.java    |  12 +-
 .../server/namenode/TestStripedINodeFile.java   |  27 ++--
 ...TestOfflineImageViewerWithStripedBlocks.java |   2 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  |  12 +-
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    |   2 +-
 .../test/resources/testErasureCodingConf.xml    |  44 +++---
 87 files changed, 798 insertions(+), 1171 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index fb02476..0d5bf8f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -30,12 +30,6 @@ public final class ECSchema {
   public static final String CODEC_NAME_KEY = "codec";
 
   /**
-   * A friendly and understandable name that can mean what's it, also serves as
-   * the identifier that distinguish it from other schemas.
-   */
-  private final String schemaName;
-
-  /**
    * The erasure codec name associated.
    */
   private final String codecName;
@@ -59,14 +53,9 @@ public final class ECSchema {
   /**
    * Constructor with schema name and provided all options. Note the options 
may
    * contain additional information for the erasure codec to interpret further.
-   * @param schemaName schema name
    * @param allOptions all schema options
    */
-  public ECSchema(String schemaName, Map<String, String> allOptions) {
-    assert (schemaName != null && ! schemaName.isEmpty());
-
-    this.schemaName = schemaName;
-
+  public ECSchema(Map<String, String> allOptions) {
     if (allOptions == null || allOptions.isEmpty()) {
       throw new IllegalArgumentException("No schema options are provided");
     }
@@ -94,33 +83,27 @@ public final class ECSchema {
 
   /**
    * Constructor with key parameters provided.
-   * @param schemaName schema name
    * @param codecName codec name
    * @param numDataUnits number of data units used in the schema
    * @param numParityUnits number os parity units used in the schema
    */
-  public ECSchema(String schemaName, String codecName,
-                  int numDataUnits, int numParityUnits) {
-    this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  public ECSchema(String codecName, int numDataUnits, int numParityUnits) {
+    this(codecName, numDataUnits, numParityUnits, null);
   }
 
   /**
    * Constructor with key parameters provided. Note the extraOptions may 
contain
    * additional information for the erasure codec to interpret further.
-   * @param schemaName schema name
    * @param codecName codec name
    * @param numDataUnits number of data units used in the schema
    * @param numParityUnits number os parity units used in the schema
    * @param extraOptions extra options to configure the codec
    */
-  public ECSchema(String schemaName, String codecName, int numDataUnits,
-                  int numParityUnits, Map<String, String> extraOptions) {
-
-    assert (schemaName != null && ! schemaName.isEmpty());
+  public ECSchema(String codecName, int numDataUnits, int numParityUnits,
+      Map<String, String> extraOptions) {
     assert (codecName != null && ! codecName.isEmpty());
     assert (numDataUnits > 0 && numParityUnits > 0);
 
-    this.schemaName = schemaName;
     this.codecName = codecName;
     this.numDataUnits = numDataUnits;
     this.numParityUnits = numParityUnits;
@@ -154,14 +137,6 @@ public final class ECSchema {
   }
 
   /**
-   * Get the schema name
-   * @return schema name
-   */
-  public String getSchemaName() {
-    return schemaName;
-  }
-
-  /**
    * Get the codec name
    * @return codec name
    */
@@ -201,7 +176,6 @@ public final class ECSchema {
   public String toString() {
     StringBuilder sb = new StringBuilder("ECSchema=[");
 
-    sb.append("Name=" + schemaName + ", ");
     sb.append("Codec=" + codecName + ", ");
     sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
     sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
@@ -235,9 +209,6 @@ public final class ECSchema {
     if (numParityUnits != ecSchema.numParityUnits) {
       return false;
     }
-    if (!schemaName.equals(ecSchema.schemaName)) {
-      return false;
-    }
     if (!codecName.equals(ecSchema.codecName)) {
       return false;
     }
@@ -246,8 +217,7 @@ public final class ECSchema {
 
   @Override
   public int hashCode() {
-    int result = schemaName.hashCode();
-    result = 31 * result + codecName.hashCode();
+    int result = codecName.hashCode();
     result = 31 * result + extraOptions.hashCode();
     result = 31 * result + numDataUnits;
     result = 31 * result + numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
deleted file mode 100644
index fce46f8..0000000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
-
-/**
- * A EC schema loading utility that loads predefined EC schemas from XML file
- */
-public class SchemaLoader {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      SchemaLoader.class.getName());
-
-  /**
-   * Load predefined ec schemas from configuration file. This file is
-   * expected to be in the XML format.
-   */
-  public List<ECSchema> loadSchema(String schemaFilePath) {
-    File confFile = getSchemaFile(schemaFilePath);
-    if (confFile == null) {
-      LOG.warn("Not found any predefined EC schema file");
-      return Collections.emptyList();
-    }
-
-    try {
-      return loadSchema(confFile);
-    } catch (ParserConfigurationException e) {
-      throw new RuntimeException("Failed to load schema file: " + confFile);
-    } catch (IOException e) {
-      throw new RuntimeException("Failed to load schema file: " + confFile);
-    } catch (SAXException e) {
-      throw new RuntimeException("Failed to load schema file: " + confFile);
-    }
-  }
-
-  private List<ECSchema> loadSchema(File schemaFile)
-      throws ParserConfigurationException, IOException, SAXException {
-
-    LOG.info("Loading predefined EC schema file {}", schemaFile);
-
-    // Read and parse the schema file.
-    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
-    dbf.setIgnoringComments(true);
-    DocumentBuilder builder = dbf.newDocumentBuilder();
-    Document doc = builder.parse(schemaFile);
-    Element root = doc.getDocumentElement();
-
-    if (!"schemas".equals(root.getTagName())) {
-      throw new RuntimeException("Bad EC schema config file: " +
-          "top-level element not <schemas>");
-    }
-
-    NodeList elements = root.getChildNodes();
-    List<ECSchema> schemas = new ArrayList<ECSchema>();
-    for (int i = 0; i < elements.getLength(); i++) {
-      Node node = elements.item(i);
-      if (node instanceof Element) {
-        Element element = (Element) node;
-        if ("schema".equals(element.getTagName())) {
-          ECSchema schema = loadSchema(element);
-            schemas.add(schema);
-        } else {
-          LOG.warn("Bad element in EC schema configuration file: {}",
-              element.getTagName());
-        }
-      }
-    }
-
-    return schemas;
-  }
-
-  /**
-   * Path to the XML file containing predefined ec schemas. If the path is
-   * relative, it is searched for in the classpath.
-   */
-  private File getSchemaFile(String schemaFilePath) {
-    File schemaFile = new File(schemaFilePath);
-    if (! schemaFile.isAbsolute()) {
-      URL url = Thread.currentThread().getContextClassLoader()
-          .getResource(schemaFilePath);
-      if (url == null) {
-        LOG.warn("{} not found on the classpath.", schemaFilePath);
-        schemaFile = null;
-      } else if (! url.getProtocol().equalsIgnoreCase("file")) {
-        throw new RuntimeException(
-            "EC predefined schema file " + url +
-                " found on the classpath is not on the local filesystem.");
-      } else {
-        schemaFile = new File(url.getPath());
-      }
-    }
-
-    return schemaFile;
-  }
-
-  /**
-   * Loads a schema from a schema element in the configuration file
-   */
-  private ECSchema loadSchema(Element element) {
-    String schemaName = element.getAttribute("name");
-    Map<String, String> ecOptions = new HashMap<String, String>();
-    NodeList fields = element.getChildNodes();
-
-    for (int i = 0; i < fields.getLength(); i++) {
-      Node fieldNode = fields.item(i);
-      if (fieldNode instanceof Element) {
-        Element field = (Element) fieldNode;
-        String tagName = field.getTagName();
-        String value = ((Text) field.getFirstChild()).getData().trim();
-        ecOptions.put(tagName, value);
-      }
-    }
-
-    ECSchema schema = new ECSchema(schemaName, ecOptions);
-    return schema;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
index c362b96..1d39901 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
@@ -26,7 +26,6 @@ public class TestECSchema {
 
   @Test
   public void testGoodSchema() {
-    String schemaName = "goodSchema";
     int numDataUnits = 6;
     int numParityUnits = 3;
     String codec = "rs";
@@ -39,10 +38,9 @@ public class TestECSchema {
     options.put(ECSchema.CODEC_NAME_KEY, codec);
     options.put(extraOption, extraOptionValue);
 
-    ECSchema schema = new ECSchema(schemaName, options);
+    ECSchema schema = new ECSchema(options);
     System.out.println(schema.toString());
-    
-    assertEquals(schemaName, schema.getSchemaName());
+
     assertEquals(numDataUnits, schema.getNumDataUnits());
     assertEquals(numParityUnits, schema.getNumParityUnits());
     assertEquals(codec, schema.getCodecName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
deleted file mode 100644
index 50d2091..0000000
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestSchemaLoader.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.PrintWriter;
-import java.util.List;
-
-import org.junit.Test;
-
-public class TestSchemaLoader {
-
-  final static String TEST_DIR = new File(System.getProperty(
-      "test.build.data", "/tmp")).getAbsolutePath();
-
-  final static String SCHEMA_FILE = new File(TEST_DIR, "test-ecschema")
-      .getAbsolutePath();
-
-  @Test
-  public void testLoadSchema() throws Exception {
-    PrintWriter out = new PrintWriter(new FileWriter(SCHEMA_FILE));
-    out.println("<?xml version=\"1.0\"?>");
-    out.println("<schemas>");
-    out.println("  <schema name=\"RSk6m3\">");
-    out.println("    <numDataUnits>6</numDataUnits>");
-    out.println("    <numParityUnits>3</numParityUnits>");
-    out.println("    <codec>RS</codec>");
-    out.println("  </schema>");
-    out.println("  <schema name=\"RSk10m4\">");
-    out.println("    <numDataUnits>10</numDataUnits>");
-    out.println("    <numParityUnits>4</numParityUnits>");
-    out.println("    <codec>RS</codec>");
-    out.println("  </schema>");
-    out.println("</schemas>");
-    out.close();
-
-    SchemaLoader schemaLoader = new SchemaLoader();
-    List<ECSchema> schemas = schemaLoader.loadSchema(SCHEMA_FILE);
-
-    assertEquals(2, schemas.size());
-
-    ECSchema schema1 = schemas.get(0);
-    assertEquals("RSk6m3", schema1.getSchemaName());
-    assertEquals(0, schema1.getExtraOptions().size());
-    assertEquals(6, schema1.getNumDataUnits());
-    assertEquals(3, schema1.getNumParityUnits());
-    assertEquals("RS", schema1.getCodecName());
-
-    ECSchema schema2 = schemas.get(1);
-    assertEquals("RSk10m4", schema2.getSchemaName());
-    assertEquals(0, schema2.getExtraOptions().size());
-    assertEquals(10, schema2.getNumDataUnits());
-    assertEquals(4, schema2.getNumParityUnits());
-    assertEquals("RS", schema2.getCodecName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 214e15d..00191e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -185,8 +185,8 @@ public interface HdfsClientConfigKeys {
 
     String  THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
     /**
-     * With default 6+3 schema, each normal read could span 6 DNs. So this
-     * default value accommodates 3 read streams
+     * With default RS-6-3-64k erasure coding policy, each normal read could 
span
+     * 6 DNs, so this default value accommodates 3 read streams
      */
     int     THREADPOOL_SIZE_DEFAULT = 18;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index e37f440..19d6211 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,7 +45,6 @@ import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
@@ -1486,21 +1485,20 @@ public interface ClientProtocol {
   EventBatchList getEditsFromTxid(long txid) throws IOException;
 
   /**
-   * Create an erasure coding zone with specified schema, if any, otherwise
+   * Create an erasure coding zone with specified policy, if any, otherwise
    * default
    */
   @AtMostOnce
-  void createErasureCodingZone(String src, ECSchema schema, int cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException;
 
   /**
-   * Gets list of ECSchemas loaded in Namenode
+   * Get the erasure coding policies loaded in Namenode
    *
-   * @return Returns the list of ECSchemas loaded at Namenode
    * @throws IOException
    */
   @Idempotent
-  ECSchema[] getECSchemas() throws IOException;
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException;
 
   /**
    * Get the information about the EC zone for the path

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
new file mode 100644
index 0000000..e5dfdff
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+import java.util.Map;
+
+/**
+ * A policy about how to write/read/code an erasure coding file.
+ */
+public final class ErasureCodingPolicy {
+
+  private final String name;
+  private final ECSchema schema;
+  private final int cellSize;
+
+  public ErasureCodingPolicy(String name, ECSchema schema, int cellSize){
+    this.name = name;
+    this.schema = schema;
+    this.cellSize = cellSize;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public ECSchema getSchema() {
+    return schema;
+  }
+
+  public int getCellSize() {
+    return cellSize;
+  }
+
+  public int getNumDataUnits() {
+    return schema.getNumDataUnits();
+  }
+
+  public int getNumParityUnits() {
+    return schema.getNumParityUnits();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ErasureCodingPolicy that = (ErasureCodingPolicy) o;
+
+    if (that.getName().equals(name) && that.getCellSize() == cellSize
+        && that.getSchema().equals(schema)) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = name.hashCode();
+    result = 31 * result + schema.hashCode();
+    result = 31 * result + cellSize;
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ErasureCodingPolicy=[");
+    sb.append("Name=" + name + ", ");
+    sb.append("Schema=[" + schema.toString() + "], ");
+    sb.append("CellSize=" + cellSize + " ");
+    sb.append("]");
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
index 655def3..533b630 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
@@ -16,21 +16,17 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import org.apache.hadoop.io.erasurecode.ECSchema;
-
 /**
  * Information about the EC Zone at the specified path.
  */
 public class ErasureCodingZone {
 
   private String dir;
-  private ECSchema schema;
-  private int cellSize;
+  private ErasureCodingPolicy ecPolicy;
 
-  public ErasureCodingZone(String dir, ECSchema schema, int cellSize) {
+  public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) {
     this.dir = dir;
-    this.schema = schema;
-    this.cellSize = cellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   /**
@@ -43,24 +39,16 @@ public class ErasureCodingZone {
   }
 
   /**
-   * Get the schema for the EC Zone
+   * Get the erasure coding policy for the EC Zone
    * 
    * @return
    */
-  public ECSchema getSchema() {
-    return schema;
-  }
-
-  /**
-   * Get cellSize for the EC Zone
-   */
-  public int getCellSize() {
-    return cellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   @Override
   public String toString() {
-    return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: "
-        + cellSize;
+    return "Dir: " + getDir() + ", Policy: " + ecPolicy;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index fa816e4..3d19ab9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -80,8 +80,8 @@ public final class HdfsConstants {
 
   /*
    * These values correspond to the values used by the system default erasure
-   * coding schema.
-   * TODO: to be removed once all places use schema.
+   * coding policy.
+   * TODO: get these values from ec policy of the associated INodeFile
    */
 
   public static final byte NUM_DATA_BLOCKS = 6;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 8c902b4..6e05ce0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /** Interface that represents the over the wire information for a file.
  */
@@ -49,8 +48,7 @@ public class HdfsFileStatus {
 
   private final FileEncryptionInfo feInfo;
 
-  private final ECSchema ecSchema;
-  private final int stripeCellSize;
+  private final ErasureCodingPolicy ecPolicy;
   
   // Used by dir, not including dot and dotdot. Always zero for a regular file.
   private final int childrenNum;
@@ -77,7 +75,7 @@ public class HdfsFileStatus {
       long blocksize, long modification_time, long access_time,
       FsPermission permission, String owner, String group, byte[] symlink,
       byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
-      byte storagePolicy, ECSchema ecSchema, int stripeCellSize) {
+      byte storagePolicy, ErasureCodingPolicy ecPolicy) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
@@ -97,8 +95,7 @@ public class HdfsFileStatus {
     this.childrenNum = childrenNum;
     this.feInfo = feInfo;
     this.storagePolicy = storagePolicy;
-    this.ecSchema = ecSchema;
-    this.stripeCellSize = stripeCellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   /**
@@ -256,12 +253,8 @@ public class HdfsFileStatus {
     return feInfo;
   }
 
-  public ECSchema getECSchema() {
-    return ecSchema;
-  }
-
-  public int getStripeCellSize() {
-    return stripeCellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   public final int getChildrenNum() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index 735e7b2..6e01bbe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -24,7 +24,6 @@ import java.util.Comparator;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /**
  * Collection of blocks with their locations and the file length.
@@ -38,8 +37,7 @@ public class LocatedBlocks {
   private final LocatedBlock lastLocatedBlock;
   private final boolean isLastBlockComplete;
   private final FileEncryptionInfo fileEncryptionInfo;
-  private final ECSchema ecSchema;
-  private final int stripeCellSize;
+  private final ErasureCodingPolicy ecPolicy;
 
   public LocatedBlocks() {
     fileLength = 0;
@@ -48,22 +46,20 @@ public class LocatedBlocks {
     lastLocatedBlock = null;
     isLastBlockComplete = false;
     fileEncryptionInfo = null;
-    ecSchema = null;
-    stripeCellSize = 0;
+    ecPolicy = null;
   }
 
   public LocatedBlocks(long flength, boolean isUnderConstuction,
       List<LocatedBlock> blks, LocatedBlock lastBlock,
       boolean isLastBlockCompleted, FileEncryptionInfo feInfo,
-      ECSchema ecSchema, int stripeCellSize) {
+      ErasureCodingPolicy ecPolicy) {
     fileLength = flength;
     blocks = blks;
     underConstruction = isUnderConstuction;
     this.lastLocatedBlock = lastBlock;
     this.isLastBlockComplete = isLastBlockCompleted;
     this.fileEncryptionInfo = feInfo;
-    this.ecSchema = ecSchema;
-    this.stripeCellSize = stripeCellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   /**
@@ -120,17 +116,10 @@ public class LocatedBlocks {
   }
 
   /**
-   * @return The ECSchema for ErasureCoded file, null otherwise.
+   * @return The ECPolicy for ErasureCoded file, null otherwise.
    */
-  public ECSchema getECSchema() {
-    return ecSchema;
-  }
-
-  /**
-   * @return Stripe Cell size for ErasureCoded file, 0 otherwise.
-   */
-  public int getStripeCellSize() {
-    return stripeCellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index a6c7b10..813ea26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -61,7 +61,7 @@ public class SnapshottableDirectoryStatus {
       int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
     this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
         access_time, permission, owner, group, null, localName, inodeId,
-        childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, 
null, 0);
+        childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, 
null);
     this.snapshotNumber = snapshotNumber;
     this.snapshotQuota = snapshotQuota;
     this.parentFullPath = parentFullPath;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index eeadd73..9ebf010 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -132,7 +132,7 @@ class JsonUtilClient {
         blockSize, mTime, aTime, permission, owner, group,
         symlink, DFSUtilClient.string2Bytes(localName),
         fileId, childrenNum, null,
-        storagePolicy, null, 0);
+        storagePolicy, null);
   }
 
   /** Convert a Json map to an ExtendedBlock object. */
@@ -503,7 +503,7 @@ class JsonUtilClient {
         (Map<?, ?>) m.get("lastLocatedBlock"));
     final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
     return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete, null, null, 0);
+        lastLocatedBlock, isLastBlockComplete, null, null);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 62db8ea..fb10e9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -873,8 +873,8 @@ service ClientNamenodeProtocol {
       returns(GetCurrentEditLogTxidResponseProto);
   rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
       returns(GetEditsFromTxidResponseProto);
-  rpc getECSchemas(GetECSchemasRequestProto)
-      returns(GetECSchemasResponseProto);
+  rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto)
+      returns(GetErasureCodingPoliciesResponseProto);
   rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
       returns(GetErasureCodingZoneResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 56bb7a2..d27f782 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -28,24 +28,22 @@ import "hdfs.proto";
  */
 message ErasureCodingZoneProto {
   required string dir = 1;
-  required ECSchemaProto schema = 2;
-  required uint32 cellSize = 3;
+  required ErasureCodingPolicyProto ecPolicy = 2;
 }
 
 message CreateErasureCodingZoneRequestProto {
   required string src = 1;
-  optional ECSchemaProto schema = 2;
-  optional uint32 cellSize = 3;
+  optional ErasureCodingPolicyProto ecPolicy = 2;
 }
 
 message CreateErasureCodingZoneResponseProto {
 }
 
-message GetECSchemasRequestProto { // void request
+message GetErasureCodingPoliciesRequestProto { // void request
 }
 
-message GetECSchemasResponseProto {
-  repeated ECSchemaProto schemas = 1;
+message GetErasureCodingPoliciesResponseProto {
+  repeated ErasureCodingPolicyProto ecPolicies = 1;
 }
 
 message GetErasureCodingZoneRequestProto {
@@ -66,6 +64,5 @@ message BlockECRecoveryInfoProto {
   required StorageUuidsProto targetStorageUuids = 4;
   required StorageTypesProto targetStorageTypes = 5;
   repeated uint32 liveBlockIndices = 6;
-  required ECSchemaProto ecSchema = 7;
-  required uint32 cellSize = 8;
-}
\ No newline at end of file
+  required ErasureCodingPolicyProto ecPolicy = 7;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index d2cb665..63fe90c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -306,8 +306,7 @@ message LocatedBlocksProto {
   optional FileEncryptionInfoProto fileEncryptionInfo = 6;
 
   // Optional field for erasure coding
-  optional ECSchemaProto eCSchema = 7;
-  optional uint32 stripeCellSize = 8;
+  optional ErasureCodingPolicyProto ecPolicy = 7;
 }
 
 /**
@@ -322,11 +321,16 @@ message ECSchemaOptionEntryProto {
  * ECSchema for erasurecoding
  */
 message ECSchemaProto {
-  required string schemaName = 1;
-  required string codecName = 2;
-  required uint32 dataUnits = 3;
-  required uint32 parityUnits = 4;
-  repeated ECSchemaOptionEntryProto options = 5;
+  required string codecName = 1;
+  required uint32 dataUnits = 2;
+  required uint32 parityUnits = 3;
+  repeated ECSchemaOptionEntryProto options = 4;
+}
+
+message ErasureCodingPolicyProto {
+  required string name = 1;
+  required ECSchemaProto schema = 2;
+  required uint32 cellSize = 3;
 }
 
 /**
@@ -365,8 +369,7 @@ message HdfsFileStatusProto {
   optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
 
   // Optional field for erasure coding
-  optional ECSchemaProto ecSchema = 17;
-  optional uint32 stripeCellSize = 18;
+  optional ErasureCodingPolicyProto ecPolicy = 17;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 45afd2c..173bf9b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -394,3 +394,6 @@
 
     HDFS-8827. Erasure Coding: Fix NPE when NameNode processes over-replicated
     striped blocks. (Walter Su and Takuya Fukudome via jing9)
+
+    HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in 
+    hadoop-hdfs. (Walter Su via zhz)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8bf1444..2a1d219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -164,7 +164,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -1176,10 +1176,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
     try {
       LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
       if (locatedBlocks != null) {
-        ECSchema schema = locatedBlocks.getECSchema();
-        if (schema != null) {
-          return new DFSStripedInputStream(this, src, verifyChecksum, schema,
-              locatedBlocks.getStripeCellSize(), locatedBlocks);
+        ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
+        if (ecPolicy != null) {
+          return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
+              locatedBlocks);
         }
         return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
       } else {
@@ -3012,12 +3012,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
     return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
-  public void createErasureCodingZone(String src, ECSchema schema, int 
cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException {
     checkOpen();
     TraceScope scope = getPathTraceScope("createErasureCodingZone", src);
     try {
-      namenode.createErasureCodingZone(src, schema, cellSize);
+      namenode.createErasureCodingZone(src, ecPolicy);
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
           SafeModeException.class,
@@ -3139,11 +3139,11 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
     }
   }
 
-  public ECSchema[] getECSchemas() throws IOException {
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     checkOpen();
-    TraceScope scope = Trace.startSpan("getECSchemas", traceSampler);
+    TraceScope scope = Trace.startSpan("getErasureCodingPolicies", 
traceSampler);
     try {
-      return namenode.getECSchemas();
+      return namenode.getErasureCodingPolicies();
     } finally {
       scope.close();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 59f14d5..aabfbe5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -691,12 +691,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final boolean 
DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT =
       false;
 
-  public static final String DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE =
-      "dfs.client.striped.read.threadpool.size";
-  // With default 3+2 schema, each normal read could span 3 DNs. So this
-  // default value accommodates 6 read streams
-  public static final int DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE 
= 18;
-
   // Slow io warning log threshold settings for dfsclient and datanode.
   public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
     "dfs.datanode.slow.io.warning.threshold.ms";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 404bbfc..7d92024 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -272,7 +272,7 @@ public class DFSOutputStream extends FSOutputSummer
       }
       Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
       final DFSOutputStream out;
-      if(stat.getECSchema() != null) {
+      if(stat.getErasureCodingPolicy() != null) {
         out = new DFSStripedOutputStream(dfsClient, src, stat,
             flag, progress, checksum, favoredNodes);
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 3612063..2ad63b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -36,7 +36,7 @@ import static 
org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResu
 
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.util.DirectBufferPool;
@@ -147,7 +147,7 @@ public class DFSStripedInputStream extends DFSInputStream {
   /** the buffer for a complete stripe */
   private ByteBuffer curStripeBuf;
   private ByteBuffer parityBuf;
-  private final ECSchema schema;
+  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureDecoder decoder;
 
   /**
@@ -158,15 +158,15 @@ public class DFSStripedInputStream extends DFSInputStream 
{
   private final CompletionService<Void> readingService;
 
   DFSStripedInputStream(DFSClient dfsClient, String src,
-      boolean verifyChecksum, ECSchema schema, int cellSize,
+      boolean verifyChecksum, ErasureCodingPolicy ecPolicy,
       LocatedBlocks locatedBlocks) throws IOException {
     super(dfsClient, src, verifyChecksum, locatedBlocks);
 
-    assert schema != null;
-    this.schema = schema;
-    this.cellSize = cellSize;
-    dataBlkNum = (short) schema.getNumDataUnits();
-    parityBlkNum = (short) schema.getNumParityUnits();
+    assert ecPolicy != null;
+    this.ecPolicy = ecPolicy;
+    this.cellSize = ecPolicy.getCellSize();
+    dataBlkNum = (short) ecPolicy.getNumDataUnits();
+    parityBlkNum = (short) ecPolicy.getNumParityUnits();
     groupSize = dataBlkNum + parityBlkNum;
     blockReaders = new BlockReaderInfo[groupSize];
     curStripeRange = new StripeRange(0, 0);
@@ -282,7 +282,7 @@ public class DFSStripedInputStream extends DFSInputStream {
         stripeLimit - stripeBufOffset);
 
     LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
-    AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(schema, 
cellSize,
+    AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, 
cellSize,
         blockGroup, offsetInBlockGroup,
         offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
     final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
@@ -510,7 +510,7 @@ public class DFSStripedInputStream extends DFSInputStream {
     LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
 
     AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
-        schema, cellSize, blockGroup, start, end, buf, offset);
+        ecPolicy, cellSize, blockGroup, start, end, buf, offset);
     CompletionService<Void> readService = new ExecutorCompletionService<>(
         dfsClient.getStripedReadsThreadPool());
     final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 746b791..4ca8fe6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
@@ -276,10 +276,10 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
       LOG.debug("Creating DFSStripedOutputStream for " + src);
     }
 
-    final ECSchema schema = stat.getECSchema();
-    final int numParityBlocks = schema.getNumParityUnits();
-    cellSize = stat.getStripeCellSize();
-    numDataBlocks = schema.getNumDataUnits();
+    final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
+    final int numParityBlocks = ecPolicy.getNumParityUnits();
+    cellSize = ecPolicy.getCellSize();
+    numDataBlocks = ecPolicy.getNumDataUnits();
     numAllBlocks = numDataBlocks + numParityBlocks;
 
     encoder = CodecUtil.createRSRawEncoder(dfsClient.getConfiguration(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4c9f9cb..f8cca02 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -90,7 +90,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
@@ -2305,18 +2305,17 @@ public class DistributedFileSystem extends FileSystem {
    * Create the erasurecoding zone
    * 
    * @param path Directory to create the ec zone
-   * @param schema ECSchema for the zone. If not specified default will be 
used.
-   * @param cellSize Cellsize for the striped erasure coding
+   * @param ecPolicy erasure coding policy for the zone. If not specified 
default will be used.
    * @throws IOException
    */
-  public void createErasureCodingZone(final Path path, final ECSchema schema,
-      final int cellSize) throws IOException {
+  public void createErasureCodingZone(final Path path, final 
ErasureCodingPolicy ecPolicy)
+      throws IOException {
     Path absF = fixRelativePart(path);
     new FileSystemLinkResolver<Void>() {
       @Override
       public Void doCall(final Path p) throws IOException,
           UnresolvedLinkException {
-        dfs.createErasureCodingZone(getPathName(p), schema, cellSize);
+        dfs.createErasureCodingZone(getPathName(p), ecPolicy);
         return null;
       }
 
@@ -2324,7 +2323,7 @@ public class DistributedFileSystem extends FileSystem {
       public Void next(final FileSystem fs, final Path p) throws IOException {
         if (fs instanceof DistributedFileSystem) {
           DistributedFileSystem myDfs = (DistributedFileSystem) fs;
-          myDfs.createErasureCodingZone(p, schema, cellSize);
+          myDfs.createErasureCodingZone(p, ecPolicy);
           return null;
         }
         throw new UnsupportedOperationException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 5a3c885..e6e67cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 /**
  * The public API for performing administrative functions on HDFS. Those 
writing
@@ -369,17 +369,13 @@ public class HdfsAdmin {
   /**
    * Create the ErasureCoding zone
    *
-   * @param path
-   *          Directory to create the ErasureCoding zone
-   * @param schema
-   *          ECSchema for the zone. If not specified default will be used.
-   * @param cellSize
-   *          Cellsize for the striped ErasureCoding
+   * @param path Directory to create the ErasureCoding zone
+   * @param ecPolicy erasure coding policy for the zone. If null, the default 
will be used.
    * @throws IOException
    */
-  public void createErasureCodingZone(final Path path, final ECSchema schema,
-      final int cellSize) throws IOException {
-    dfs.createErasureCodingZone(path, schema, cellSize);
+  public void createErasureCodingZone(final Path path,
+      final ErasureCodingPolicy ecPolicy) throws IOException {
+    dfs.createErasureCodingZone(path, ecPolicy);
   }
 
   /**
@@ -395,12 +391,11 @@ public class HdfsAdmin {
   }
 
   /**
-   * Get the ErasureCoding schemas supported.
+   * Get the ErasureCoding policies supported.
    *
-   * @return ECSchemas
    * @throws IOException
    */
-  public ECSchema[] getECSchemas() throws IOException {
-    return dfs.getClient().getECSchemas();
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+    return dfs.getClient().getErasureCodingPolicies();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index 4701538..2121dcf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.io.erasurecode.ECSchema;
 
 /** 
  * Interface that represents the over the wire information
@@ -60,10 +59,10 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
       long access_time, FsPermission permission, String owner, String group,
       byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
       int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
-      ECSchema schema, int stripeCellSize) {
+      ErasureCodingPolicy ecPolicy) {
     super(length, isdir, block_replication, blocksize, modification_time,
         access_time, permission, owner, group, symlink, path, fileId,
-        childrenNum, feInfo, storagePolicy, schema, stripeCellSize);
+        childrenNum, feInfo, storagePolicy, ecPolicy);
     this.locations = locations;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index f988ae3..59bd562 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -202,8 +202,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
@@ -223,7 +223,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import 
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
 import 
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
 import 
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
@@ -1406,10 +1406,9 @@ public class 
ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, CreateErasureCodingZoneRequestProto req)
       throws ServiceException {
     try {
-      ECSchema schema = req.hasSchema() ? PBHelper.convertECSchema(req
-          .getSchema()) : null;
-      int cellSize = req.hasCellSize() ? req.getCellSize() : 0;
-      server.createErasureCodingZone(req.getSrc(), schema, cellSize);
+      ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? 
PBHelper.convertErasureCodingPolicy(req
+          .getEcPolicy()) : null;
+      server.createErasureCodingZone(req.getSrc(), ecPolicy);
       return CreateErasureCodingZoneResponseProto.newBuilder().build();
     } catch (IOException e) {
       throw new ServiceException(e);
@@ -1539,14 +1538,14 @@ public class 
ClientNamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
-  public GetECSchemasResponseProto getECSchemas(RpcController controller,
-      GetECSchemasRequestProto request) throws ServiceException {
+  public GetErasureCodingPoliciesResponseProto 
getErasureCodingPolicies(RpcController controller,
+      GetErasureCodingPoliciesRequestProto request) throws ServiceException {
     try {
-      ECSchema[] ecSchemas = server.getECSchemas();
-      GetECSchemasResponseProto.Builder resBuilder = GetECSchemasResponseProto
+      ErasureCodingPolicy[] ecPolicies = server.getErasureCodingPolicies();
+      GetErasureCodingPoliciesResponseProto.Builder resBuilder = 
GetErasureCodingPoliciesResponseProto
           .newBuilder();
-      for (ECSchema ecSchema : ecSchemas) {
-        resBuilder.addSchemas(PBHelper.convertECSchema(ecSchema));
+      for (ErasureCodingPolicy ecPolicy : ecPolicies) {
+        
resBuilder.addEcPolicies(PBHelper.convertErasureCodingPolicy(ecPolicy));
       }
       return resBuilder.build();
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 342da0c..4f29c4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -166,12 +166,12 @@ import org.apache.hadoop.hdfs.protocol.proto.*;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@@ -183,7 +183,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolTranslator;
@@ -241,8 +241,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
   VOID_GET_STORAGE_POLICIES_REQUEST =
       GetStoragePoliciesRequestProto.newBuilder().build();
 
-  private final static GetECSchemasRequestProto
-  VOID_GET_ECSCHEMAS_REQUEST = GetECSchemasRequestProto
+  private final static GetErasureCodingPoliciesRequestProto
+  VOID_GET_EC_POLICIES_REQUEST = GetErasureCodingPoliciesRequestProto
       .newBuilder().build();
 
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
@@ -1420,16 +1420,13 @@ public class ClientNamenodeProtocolTranslatorPB 
implements
   }
 
   @Override
-  public void createErasureCodingZone(String src, ECSchema schema, int 
cellSize)
+  public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
       throws IOException {
     final CreateErasureCodingZoneRequestProto.Builder builder =
         CreateErasureCodingZoneRequestProto.newBuilder();
     builder.setSrc(src);
-    if (schema != null) {
-      builder.setSchema(PBHelper.convertECSchema(schema));
-    }
-    if (cellSize > 0) {
-      builder.setCellSize(cellSize);
+    if (ecPolicy != null) {
+      builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
     }
     CreateErasureCodingZoneRequestProto req = builder.build();
     try {
@@ -1563,16 +1560,17 @@ public class ClientNamenodeProtocolTranslatorPB 
implements
   }
 
   @Override
-  public ECSchema[] getECSchemas() throws IOException {
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     try {
-      GetECSchemasResponseProto response = rpcProxy.getECSchemas(null,
-          VOID_GET_ECSCHEMAS_REQUEST);
-      ECSchema[] schemas = new ECSchema[response.getSchemasCount()];
+      GetErasureCodingPoliciesResponseProto response = rpcProxy
+          .getErasureCodingPolicies(null, VOID_GET_EC_POLICIES_REQUEST);
+      ErasureCodingPolicy[] ecPolicies =
+          new ErasureCodingPolicy[response.getEcPoliciesCount()];
       int i = 0;
-      for (ECSchemaProto schemaProto : response.getSchemasList()) {
-        schemas[i++] = PBHelper.convertECSchema(schemaProto);
+      for (ErasureCodingPolicyProto ecPolicyProto : 
response.getEcPoliciesList()) {
+        ecPolicies[i++] = PBHelper.convertErasureCodingPolicy(ecPolicyProto);
       }
-      return schemas;
+      return ecPolicies;
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index a97e2ff..c083b5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -137,6 +138,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecovery
 import 
org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
@@ -1348,8 +1350,7 @@ public class PBHelper {
             PBHelper.convertLocatedBlockProto(lb.getLastBlock()) : null,
         lb.getIsLastBlockComplete(),
         lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : 
null,
-        lb.hasECSchema() ? convertECSchema(lb.getECSchema()) : null,
-        lb.hasStripeCellSize() ? lb.getStripeCellSize() : 0);
+        lb.hasEcPolicy() ? convertErasureCodingPolicy(lb.getEcPolicy()) : 
null);
   }
   
   public static LocatedBlocksProto convert(LocatedBlocks lb) {
@@ -1365,11 +1366,8 @@ public class PBHelper {
     if (lb.getFileEncryptionInfo() != null) {
       builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
     }
-    if (lb.getECSchema() != null) {
-      builder.setECSchema(convertECSchema(lb.getECSchema()));
-    }
-    if (lb.getStripeCellSize() != 0) {
-      builder.setStripeCellSize(lb.getStripeCellSize());
+    if (lb.getErasureCodingPolicy() != null) {
+      
builder.setEcPolicy(convertErasureCodingPolicy(lb.getErasureCodingPolicy()));
     }
     return builder.setFileLength(lb.getFileLength())
         .setUnderConstruction(lb.isUnderConstruction())
@@ -1514,8 +1512,7 @@ public class PBHelper {
         fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : 
null,
         fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
             : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
-        fs.hasEcSchema() ? PBHelper.convertECSchema(fs.getEcSchema()) : null,
-        fs.hasStripeCellSize() ? fs.getStripeCellSize() : 0);
+        fs.hasEcPolicy() ? 
PBHelper.convertErasureCodingPolicy(fs.getEcPolicy()) : null);
   }
 
   public static SnapshottableDirectoryStatus convert(
@@ -1576,10 +1573,9 @@ public class PBHelper {
         builder.setLocations(PBHelper.convert(locations));
       }
     }
-    if(fs.getECSchema() != null) {
-      builder.setEcSchema(PBHelper.convertECSchema(fs.getECSchema()));
+    if(fs.getErasureCodingPolicy() != null) {
+      
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(fs.getErasureCodingPolicy()));
     }
-    builder.setStripeCellSize(fs.getStripeCellSize());
     return builder.build();
   }
   
@@ -3155,13 +3151,12 @@ public class PBHelper {
     for (ECSchemaOptionEntryProto option : optionsList) {
       options.put(option.getKey(), option.getValue());
     }
-    return new ECSchema(schema.getSchemaName(), schema.getCodecName(),
-        schema.getDataUnits(), schema.getParityUnits(), options);
+    return new ECSchema(schema.getCodecName(), schema.getDataUnits(),
+        schema.getParityUnits(), options);
   }
 
   public static ECSchemaProto convertECSchema(ECSchema schema) {
     ECSchemaProto.Builder builder = ECSchemaProto.newBuilder()
-        .setSchemaName(schema.getSchemaName())
         .setCodecName(schema.getCodecName())
         .setDataUnits(schema.getNumDataUnits())
         .setParityUnits(schema.getNumParityUnits());
@@ -3173,17 +3168,34 @@ public class PBHelper {
     return builder.build();
   }
 
+  public static ErasureCodingPolicy convertErasureCodingPolicy(
+      ErasureCodingPolicyProto policy) {
+    return new ErasureCodingPolicy(policy.getName(),
+        convertECSchema(policy.getSchema()),
+        policy.getCellSize());
+  }
+
+  public static ErasureCodingPolicyProto convertErasureCodingPolicy(
+      ErasureCodingPolicy policy) {
+    ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
+        .newBuilder()
+        .setName(policy.getName())
+        .setSchema(convertECSchema(policy.getSchema()))
+        .setCellSize(policy.getCellSize());
+    return builder.build();
+  }
+
   public static ErasureCodingZoneProto convertErasureCodingZone(
       ErasureCodingZone ecZone) {
     return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
-        .setSchema(convertECSchema(ecZone.getSchema()))
-        .setCellSize(ecZone.getCellSize()).build();
+        
.setEcPolicy(convertErasureCodingPolicy(ecZone.getErasureCodingPolicy()))
+        .build();
   }
 
   public static ErasureCodingZone convertErasureCodingZone(
       ErasureCodingZoneProto ecZoneProto) {
     return new ErasureCodingZone(ecZoneProto.getDir(),
-        convertECSchema(ecZoneProto.getSchema()), ecZoneProto.getCellSize());
+        convertErasureCodingPolicy(ecZoneProto.getEcPolicy()));
   }
   
   public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
@@ -3216,12 +3228,11 @@ public class PBHelper {
       liveBlkIndices[i] = liveBlockIndicesList.get(i).shortValue();
     }
 
-    ECSchema ecSchema = 
convertECSchema(blockEcRecoveryInfoProto.getEcSchema());
-    int cellSize = blockEcRecoveryInfoProto.getCellSize();
+    ErasureCodingPolicy ecPolicy =
+        convertErasureCodingPolicy(blockEcRecoveryInfoProto.getEcPolicy());
 
     return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
-        targetStorageUuids, convertStorageTypes, liveBlkIndices, ecSchema,
-        cellSize);
+        targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
   }
 
   public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo(
@@ -3246,8 +3257,8 @@ public class PBHelper {
     short[] liveBlockIndices = blockEcRecoveryInfo.getLiveBlockIndices();
     builder.addAllLiveBlockIndices(convertIntArray(liveBlockIndices));
 
-    builder.setEcSchema(convertECSchema(blockEcRecoveryInfo.getECSchema()));
-    builder.setCellSize(blockEcRecoveryInfo.getCellSize());
+    builder.setEcPolicy(convertErasureCodingPolicy(blockEcRecoveryInfo
+        .getErasureCodingPolicy()));
 
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 6674510..14d2fcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 
@@ -38,8 +38,7 @@ import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_S
  * array to record the block index for each triplet.
  */
 public class BlockInfoStriped extends BlockInfo {
-  private final ECSchema schema;
-  private final int cellSize;
+  private final ErasureCodingPolicy ecPolicy;
   /**
    * Always the same size with triplets. Record the block index for each 
triplet
    * TODO: actually this is only necessary for over-replicated block. Thus can
@@ -47,36 +46,34 @@ public class BlockInfoStriped extends BlockInfo {
    */
   private byte[] indices;
 
-  public BlockInfoStriped(Block blk, ECSchema schema, int cellSize) {
-    super(blk, (short) (schema.getNumDataUnits() + 
schema.getNumParityUnits()));
-    indices = new byte[schema.getNumDataUnits() + schema.getNumParityUnits()];
+  public BlockInfoStriped(Block blk, ErasureCodingPolicy ecPolicy) {
+    super(blk, (short) (ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits()));
+    indices = new byte[ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits()];
     initIndices();
-    this.schema = schema;
-    this.cellSize = cellSize;
+    this.ecPolicy = ecPolicy;
   }
 
   BlockInfoStriped(BlockInfoStriped b) {
-    this(b, b.getSchema(), b.getCellSize());
+    this(b, b.getErasureCodingPolicy());
     this.setBlockCollection(b.getBlockCollection());
   }
 
   public short getTotalBlockNum() {
-    return (short) (this.schema.getNumDataUnits()
-        + this.schema.getNumParityUnits());
+    return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
   }
 
   public short getDataBlockNum() {
-    return (short) this.schema.getNumDataUnits();
+    return (short) ecPolicy.getNumDataUnits();
   }
 
   public short getParityBlockNum() {
-    return (short) this.schema.getNumParityUnits();
+    return (short) ecPolicy.getNumParityUnits();
   }
 
   /**
    * If the block is committed/completed and its length is less than a full
    * stripe, it returns the the number of actual data blocks.
-   * Otherwise it returns the number of data units specified by schema.
+   * Otherwise it returns the number of data units specified by erasure coding 
policy.
    */
   public short getRealDataBlockNum() {
     if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
@@ -91,12 +88,8 @@ public class BlockInfoStriped extends BlockInfo {
     return (short) (getRealDataBlockNum() + getParityBlockNum());
   }
 
-  public ECSchema getSchema() {
-    return schema;
-  }
-
-  public int getCellSize() {
-    return cellSize;
+  public ErasureCodingPolicy getErasureCodingPolicy() {
+    return ecPolicy;
   }
 
   private void initIndices() {
@@ -230,7 +223,7 @@ public class BlockInfoStriped extends BlockInfo {
     // be the total of data blocks and parity blocks because
     // `getNumBytes` is the total of actual data block size.
     return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
-        this.schema.getNumDataUnits(), this.schema.getNumParityUnits(),
+        ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits(),
         BLOCK_STRIPED_CELL_SIZE);
     }
 
@@ -260,7 +253,7 @@ public class BlockInfoStriped extends BlockInfo {
       BlockUCState s, DatanodeStorageInfo[] targets) {
     final BlockInfoStripedUnderConstruction ucBlock;
     if(isComplete()) {
-      ucBlock = new BlockInfoStripedUnderConstruction(this, schema, cellSize,
+      ucBlock = new BlockInfoStripedUnderConstruction(this, ecPolicy,
           s, targets);
       ucBlock.setBlockCollection(getBlockCollection());
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/acbe42a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
index 5f78096..9de8294 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 import java.io.IOException;
 
@@ -57,17 +57,16 @@ public class BlockInfoStripedUnderConstruction extends 
BlockInfoStriped
   /**
    * Constructor with null storage targets.
    */
-  public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema,
-      int cellSize) {
-    this(blk, schema, cellSize, UNDER_CONSTRUCTION, null);
+  public BlockInfoStripedUnderConstruction(Block blk, ErasureCodingPolicy 
ecPolicy) {
+    this(blk, ecPolicy, UNDER_CONSTRUCTION, null);
   }
 
   /**
    * Create a striped block that is currently being constructed.
    */
-  public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema,
-      int cellSize, BlockUCState state, DatanodeStorageInfo[] targets) {
-    super(blk, schema, cellSize);
+  public BlockInfoStripedUnderConstruction(Block blk, ErasureCodingPolicy 
ecPolicy,
+      BlockUCState state, DatanodeStorageInfo[] targets) {
+    super(blk, ecPolicy);
     assert getBlockUCState() != COMPLETE :
       "BlockInfoStripedUnderConstruction cannot be in COMPLETE state";
     this.blockUCState = state;

Reply via email to