This is an automated email from the ASF dual-hosted git repository. mwalch pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/accumulo-examples.git
The following commit(s) were added to refs/heads/master by this push: new f89f33d #35 - Factor out TestUtil API (#36) f89f33d is described below commit f89f33d8605b3d7602e38bb9259bb25003770109 Author: Jeffrey Manno <jeffreymann...@gmail.com> AuthorDate: Mon Mar 11 17:38:49 2019 -0400 #35 - Factor out TestUtil API (#36) --- contrib/import-control.xml | 1 - .../org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/contrib/import-control.xml b/contrib/import-control.xml index 22da90b..0c0f647 100644 --- a/contrib/import-control.xml +++ b/contrib/import-control.xml @@ -37,7 +37,6 @@ <allow class="org.apache.accumulo.tracer.TraceDump"/> <allow class="org.apache.accumulo.core.trace.DistributedTrace"/> <allow class="org.apache.accumulo.core.util.format.DefaultFormatter"/> - <allow class="org.apache.accumulo.core.util.TextUtil"/> <!-- End TODO section --> <!-- disallow everything else coming from accumulo --> diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java index 68ff994..f5d2dd6 100644 --- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java +++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java @@ -25,7 +25,6 @@ import java.util.Collection; import org.apache.accumulo.core.client.AccumuloClient; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.util.TextUtil; import org.apache.accumulo.examples.cli.ClientOpts; import org.apache.accumulo.hadoop.mapreduce.AccumuloFileOutputFormat; import org.apache.accumulo.hadoop.mapreduce.partition.RangePartitioner; @@ -122,7 +121,7 @@ public class BulkIngestExample { new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))))) { Collection<Text> splits = client.tableOperations().listSplits(SetupTable.tableName, 100); for (Text split : splits) - out.println(Base64.getEncoder().encodeToString(TextUtil.getBytes(split))); + out.println(Base64.getEncoder().encodeToString(split.copyBytes())); job.setNumReduceTasks(splits.size() + 1); }