HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders (Contributed by Vinayakumar B)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a391e1d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a391e1d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a391e1d Branch: refs/heads/HDFS-7285 Commit: 5a391e1d2584dc5d77fb1325ea91c8e5854934d1 Parents: 80c56c2 Author: Vinayakumar B <[email protected]> Authored: Mon May 25 16:02:37 2015 +0530 Committer: Zhe Zhang <[email protected]> Committed: Tue May 26 12:07:16 2015 -0700 ---------------------------------------------------------------------- .../hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 ++- .../apache/hadoop/io/erasurecode/ECSchema.java | 40 +------------------- .../hadoop/io/erasurecode/TestECSchema.java | 3 -- 3 files changed, 6 insertions(+), 42 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a391e1d/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt index 531b8d5..c9b80d3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt @@ -55,4 +55,7 @@ HADOOP-11938. Enhance ByteBuffer version encode/decode API of raw erasure coder. (Kai Zheng via Zhe Zhang) - HADOOP-12013. Generate fixed data to perform erasure coder test. (Kai Zheng) \ No newline at end of file + HADOOP-12013. Generate fixed data to perform erasure coder test. (Kai Zheng) + + HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders + (vinayakumarb) http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a391e1d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java index f058ea7..fdc569e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java @@ -28,8 +28,6 @@ public final class ECSchema { public static final String NUM_DATA_UNITS_KEY = "k"; public static final String NUM_PARITY_UNITS_KEY = "m"; public static final String CODEC_NAME_KEY = "codec"; - public static final String CHUNK_SIZE_KEY = "chunkSize"; - public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K /** * A friendly and understandable name that can mean what's it, also serves as @@ -52,11 +50,6 @@ public final class ECSchema { */ private final int numParityUnits; - /** - * Unit data size for each chunk in a coding - */ - private final int chunkSize; - /* * An erasure code can have its own specific advanced parameters, subject to * itself to interpret these key-value settings. @@ -92,17 +85,9 @@ public final class ECSchema { this.numDataUnits = tmpNumDataUnits; this.numParityUnits = tmpNumParityUnits; - int tmpChunkSize = extractIntOption(CHUNK_SIZE_KEY, allOptions); - if (tmpChunkSize > 0) { - this.chunkSize = tmpChunkSize; - } else { - this.chunkSize = DEFAULT_CHUNK_SIZE; - } - allOptions.remove(CODEC_NAME_KEY); allOptions.remove(NUM_DATA_UNITS_KEY); allOptions.remove(NUM_PARITY_UNITS_KEY); - allOptions.remove(CHUNK_SIZE_KEY); // After some cleanup this.extraOptions = Collections.unmodifiableMap(allOptions); } @@ -144,14 +129,6 @@ public final class ECSchema { extraOptions = new HashMap<>(); } - int tmpChunkSize = extractIntOption(CHUNK_SIZE_KEY, extraOptions); - if (tmpChunkSize > 0) { - this.chunkSize = tmpChunkSize; - } else { - this.chunkSize = DEFAULT_CHUNK_SIZE; - } - - extraOptions.remove(CHUNK_SIZE_KEY); // After some cleanup this.extraOptions = Collections.unmodifiableMap(extraOptions); } @@ -217,14 +194,6 @@ public final class ECSchema { } /** - * Get chunk buffer size for the erasure encoding/decoding. - * @return chunk buffer size - */ - public int getChunkSize() { - return chunkSize; - } - - /** * Make a meaningful string representation for log output. * @return string representation */ @@ -235,9 +204,8 @@ public final class ECSchema { sb.append("Name=" + schemaName + ", "); sb.append("Codec=" + codecName + ", "); sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", "); - sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits + ", "); - sb.append(CHUNK_SIZE_KEY + "=" + chunkSize + - (extraOptions.isEmpty() ? "" : ", ")); + sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits); + sb.append((extraOptions.isEmpty() ? "" : ", ")); int i = 0; for (String opt : extraOptions.keySet()) { @@ -267,9 +235,6 @@ public final class ECSchema { if (numParityUnits != ecSchema.numParityUnits) { return false; } - if (chunkSize != ecSchema.chunkSize) { - return false; - } if (!schemaName.equals(ecSchema.schemaName)) { return false; } @@ -286,7 +251,6 @@ public final class ECSchema { result = 31 * result + extraOptions.hashCode(); result = 31 * result + numDataUnits; result = 31 * result + numParityUnits; - result = 31 * result + chunkSize; return result; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a391e1d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java index 15e672f..c362b96 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java @@ -29,7 +29,6 @@ public class TestECSchema { String schemaName = "goodSchema"; int numDataUnits = 6; int numParityUnits = 3; - int chunkSize = 64 * 1024 * 1024; String codec = "rs"; String extraOption = "extraOption"; String extraOptionValue = "extraOptionValue"; @@ -38,7 +37,6 @@ public class TestECSchema { options.put(ECSchema.NUM_DATA_UNITS_KEY, String.valueOf(numDataUnits)); options.put(ECSchema.NUM_PARITY_UNITS_KEY, String.valueOf(numParityUnits)); options.put(ECSchema.CODEC_NAME_KEY, codec); - options.put(ECSchema.CHUNK_SIZE_KEY, String.valueOf(chunkSize)); options.put(extraOption, extraOptionValue); ECSchema schema = new ECSchema(schemaName, options); @@ -47,7 +45,6 @@ public class TestECSchema { assertEquals(schemaName, schema.getSchemaName()); assertEquals(numDataUnits, schema.getNumDataUnits()); assertEquals(numParityUnits, schema.getNumParityUnits()); - assertEquals(chunkSize, schema.getChunkSize()); assertEquals(codec, schema.getCodecName()); assertEquals(extraOptionValue, schema.getExtraOptions().get(extraOption)); }
