MAPREDUCE-6746. Replace org.apache.commons.io.Charsets with 
java.nio.charset.StandardCharsets. Contributed by Vincent Poon.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f0bffea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f0bffea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f0bffea

Branch: refs/heads/HADOOP-12756
Commit: 3f0bffea2d35083c45acee79338e10b608374d8e
Parents: 95f2b98
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Sat Jul 30 11:45:12 2016 +0900
Committer: Akira Ajisaka <aajis...@apache.org>
Committed: Sat Jul 30 11:45:12 2016 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/mapred/TestLineRecordReader.java    | 10 +++++-----
 .../hadoop/mapreduce/lib/input/TestLineRecordReader.java  |  8 ++++----
 2 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f0bffea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
----------------------------------------------------------------------
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
index 844250b..a84f6cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
@@ -34,7 +34,7 @@ import java.util.HashSet;
 import java.util.Set;
 
 import 
org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
-import org.apache.commons.io.Charsets;
+import java.nio.charset.StandardCharsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -72,7 +72,7 @@ public class TestLineRecordReader {
     String delimiter = conf.get("textinputformat.record.delimiter");
     byte[] recordDelimiterBytes = null;
     if (null != delimiter) {
-      recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+      recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     }
     // read the data without splitting to count the records
     FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
@@ -120,7 +120,7 @@ public class TestLineRecordReader {
     String delimiter = conf.get("textinputformat.record.delimiter");
     byte[] recordDelimiterBytes = null;
     if (null != delimiter) {
-      recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+      recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     }
     // read the data without splitting to count the records
     FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
@@ -482,7 +482,7 @@ public class TestLineRecordReader {
     String inputData = "abcdefghij++kl++mno";
     Path inputFile = createInputFile(conf, inputData);
     String delimiter = "++";
-    byte[] recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+    byte[] recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     // the first split must contain two records to make sure that it also pulls
     // in the record from the 2nd split
     int splitLength = 15;
@@ -555,7 +555,7 @@ public class TestLineRecordReader {
     inputData = "abcd|efgh|+|ij|kl|+|mno|pqr";
     inputFile = createInputFile(conf, inputData);
     delimiter = "|+|";
-    recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+    recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     // walking over the buffer and split sizes checks for proper processing
     // of the ambiguous bytes of the delimiter
     for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f0bffea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
----------------------------------------------------------------------
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
index 716c4a6..f6fbbd5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
@@ -35,7 +35,7 @@ import java.util.HashSet;
 import java.util.Set;
 
 import 
org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
-import org.apache.commons.io.Charsets;
+import java.nio.charset.StandardCharsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -76,7 +76,7 @@ public class TestLineRecordReader {
     String delimiter = conf.get("textinputformat.record.delimiter");
     byte[] recordDelimiterBytes = null;
     if (null != delimiter) {
-      recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+      recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     }
     TaskAttemptContext context = new TaskAttemptContextImpl(conf, new 
TaskAttemptID());
 
@@ -416,7 +416,7 @@ public class TestLineRecordReader {
     String inputData = "abcdefghij++kl++mno";
     Path inputFile = createInputFile(conf, inputData);
     String delimiter = "++";
-    byte[] recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+    byte[] recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     int splitLength = 15;
     FileSplit split = new FileSplit(inputFile, 0, splitLength, (String[])null);
     TaskAttemptContext context = new TaskAttemptContextImpl(conf,
@@ -500,7 +500,7 @@ public class TestLineRecordReader {
     inputData = "abcd|efgh|+|ij|kl|+|mno|pqr";
     inputFile = createInputFile(conf, inputData);
     delimiter = "|+|";
-    recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+    recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
     // walking over the buffer and split sizes checks for proper processing
     // of the ambiguous bytes of the delimiter
     for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to