This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 6696c072a1 Remove unnecessary uses of ClientInfo in ITs (#2625)
6696c072a1 is described below

commit 6696c072a1cdeb35636d39cf5404d00333f6b6f2
Author: Christopher Tubbs <ctubb...@apache.org>
AuthorDate: Tue Apr 12 00:45:39 2022 -0400

    Remove unnecessary uses of ClientInfo in ITs (#2625)
    
    * Remove conversions of client Properties into ClientInfo, only to
      convert them back to Properties again; just use Properties directly
    * Remove unnecessary ClientInfo parameter from test utilities where
      client would suffice (client.properties() was also available)
    * Replace uses of clusterHarness.getClientInfo().getProperties() with
      just-as-good clusterHarness.getClientProps()
---
 .../its/mapred/AccumuloFileOutputFormatIT.java     |  2 +-
 .../hadoop/its/mapred/AccumuloInputFormatIT.java   | 10 ++--
 .../its/mapred/AccumuloRowInputFormatIT.java       |  4 +-
 .../hadoop/its/mapred/MultiTableInputFormatIT.java |  4 +-
 .../accumulo/hadoop/its/mapred/TokenFileIT.java    |  2 +-
 .../its/mapreduce/AccumuloFileOutputFormatIT.java  |  2 +-
 .../its/mapreduce/AccumuloInputFormatIT.java       | 27 +++++-----
 .../its/mapreduce/AccumuloOutputFormatIT.java      |  8 +--
 .../its/mapreduce/AccumuloRowInputFormatIT.java    |  4 +-
 .../its/mapreduce/MultiTableInputFormatIT.java     |  4 +-
 .../accumulo/hadoop/its/mapreduce/TokenFileIT.java |  2 +-
 .../org/apache/accumulo/test/CloseScannerIT.java   |  2 +-
 .../accumulo/test/functional/AccumuloClientIT.java |  4 +-
 .../apache/accumulo/test/functional/BulkIT.java    | 19 +++----
 .../accumulo/test/functional/CredentialsIT.java    |  2 +-
 .../accumulo/test/functional/ReadWriteIT.java      | 59 +++++++++++-----------
 .../test/functional/RecoveryWithEmptyRFileIT.java  |  6 +--
 .../org/apache/accumulo/test/functional/SslIT.java |  3 +-
 18 files changed, 77 insertions(+), 87 deletions(-)

diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
index 99e38d4c40..6c05924201 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
@@ -155,7 +155,7 @@ public class AccumuloFileOutputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormat(AccumuloInputFormat.class);
 
-      
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+      
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
           .auths(Authorizations.EMPTY).store(job);
       AccumuloFileOutputFormat.configure().outputPath(new 
Path(args[1])).sampler(SAMPLER_CONFIG)
           .store(job);
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloInputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloInputFormatIT.java
index 633558bcfe..e524cc5680 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloInputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloInputFormatIT.java
@@ -124,9 +124,8 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormat(AccumuloInputFormat.class);
 
-      InputFormatBuilder.InputFormatOptions<JobConf> opts =
-          
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties())
-              .table(table).auths(Authorizations.EMPTY);
+      InputFormatBuilder.InputFormatOptions<JobConf> opts = 
AccumuloInputFormat.configure()
+          
.clientProperties(getClientProps()).table(table).auths(Authorizations.EMPTY);
 
       if (sample) {
         opts.samplerConfiguration(SAMPLER_CONFIG);
@@ -221,9 +220,8 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
     try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
       accumuloClient.tableOperations().create(table);
 
-      
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
-          
.auths(auths).fetchColumns(fetchColumns).scanIsolation(true).localIterators(true)
-          .store(job);
+      
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table).auths(auths)
+          
.fetchColumns(fetchColumns).scanIsolation(true).localIterators(true).store(job);
 
       AccumuloInputFormat aif = new AccumuloInputFormat();
 
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloRowInputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloRowInputFormatIT.java
index 17da216e8b..5c4116806f 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloRowInputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloRowInputFormatIT.java
@@ -166,8 +166,8 @@ public class AccumuloRowInputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormat(AccumuloRowInputFormat.class);
 
-      
AccumuloRowInputFormat.configure().clientProperties(getClientInfo().getProperties())
-          .table(table).auths(Authorizations.EMPTY).store(job);
+      
AccumuloRowInputFormat.configure().clientProperties(getClientProps()).table(table)
+          .auths(Authorizations.EMPTY).store(job);
 
       job.setMapperClass(TestMapper.class);
       job.setMapOutputKeyClass(Key.class);
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/MultiTableInputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/MultiTableInputFormatIT.java
index 32656f4f81..6dbc204fb0 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/MultiTableInputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/MultiTableInputFormatIT.java
@@ -102,8 +102,8 @@ public class MultiTableInputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormat(AccumuloInputFormat.class);
 
-      
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties())
-          .table(table1).table(table2).store(job);
+      
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table1).table(table2)
+          .store(job);
 
       job.setMapperClass(TestMapper.class);
       job.setMapOutputKeyClass(Key.class);
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java
index 51f58377c8..0599d6d615 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/TokenFileIT.java
@@ -163,7 +163,7 @@ public class TokenFileIT extends AccumuloClusterHarness {
       File tf = new File(tempDir, "client.properties");
       assertTrue(tf.createNewFile(), "Failed to create file: " + tf);
       try (PrintStream out = new PrintStream(tf)) {
-        getClientInfo().getProperties().store(out, "Credentials for " + 
getClass().getName());
+        getClientProps().store(out, "Credentials for " + getClass().getName());
       }
 
       MRTokenFileTester.main(new String[] {tf.getAbsolutePath(), table1, 
table2});
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
index c2af432a6f..a636be2e94 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
@@ -168,7 +168,7 @@ public class AccumuloFileOutputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormatClass(AccumuloInputFormat.class);
 
-      
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+      
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
           .auths(Authorizations.EMPTY).store(job);
       AccumuloFileOutputFormat.configure().outputPath(new 
Path(args[1])).sampler(SAMPLER_CONFIG)
           .store(job);
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloInputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloInputFormatIT.java
index f4ef37075b..c852180bc0 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloInputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloInputFormatIT.java
@@ -116,7 +116,7 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
     insertData(table, currentTimeMillis());
 
     Job job = Job.getInstance();
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         .auths(Authorizations.EMPTY).scanIsolation(true).store(job);
 
     // split table
@@ -136,13 +136,13 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
     List<Range> ranges = new ArrayList<>();
     for (Text text : actualSplits)
       ranges.add(new Range(text));
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         .auths(Authorizations.EMPTY).ranges(ranges).store(job);
     splits = inputFormat.getSplits(job);
     assertEquals(actualSplits.size(), splits.size());
 
     // offline mode
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         .auths(Authorizations.EMPTY).offlineScan(true).store(job);
     assertThrows(IOException.class, () -> inputFormat.getSplits(job));
 
@@ -156,19 +156,19 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
       // overlapping ranges
       ranges.add(new Range(String.format("%09d", i), String.format("%09d", i + 
2)));
 
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         
.auths(Authorizations.EMPTY).ranges(ranges).offlineScan(true).store(job);
     splits = inputFormat.getSplits(job);
     assertEquals(2, splits.size());
 
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         
.auths(Authorizations.EMPTY).ranges(ranges).autoAdjustRanges(false).offlineScan(true)
         .store(job);
     splits = inputFormat.getSplits(job);
     assertEquals(ranges.size(), splits.size());
 
     // BatchScan not available for offline scans
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         
.auths(Authorizations.EMPTY).batchScan(true).offlineScan(true).store(job);
 
     assertThrows(IllegalArgumentException.class, () -> 
inputFormat.getSplits(job),
@@ -176,27 +176,27 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
 
     // table online tests
     client.tableOperations().online(table, true);
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         .auths(Authorizations.EMPTY).store(job);
     // test for resumption of success
     splits = inputFormat.getSplits(job);
     assertEquals(2, splits.size());
 
     // BatchScan not available with isolated iterators
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         
.auths(Authorizations.EMPTY).batchScan(true).scanIsolation(true).store(job);
 
     assertThrows(IllegalArgumentException.class, () -> 
inputFormat.getSplits(job),
         "IllegalArgumentException should have been thrown trying to batch scan 
with isolation");
 
     // BatchScan not available with local iterators
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         
.auths(Authorizations.EMPTY).batchScan(true).localIterators(true).store(job);
 
     assertThrows(IllegalArgumentException.class, () -> 
inputFormat.getSplits(job),
         "IllegalArgumentException should have been thrown trying to batch scan 
locally");
 
-    
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties()).table(table)
+    
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table)
         .auths(Authorizations.EMPTY).batchScan(true).store(job);
 
     // Check we are getting back correct type pf split
@@ -292,9 +292,8 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormatClass(inputFormatClass);
 
-      InputFormatOptions<Job> opts =
-          
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties())
-              .table(table).auths(Authorizations.EMPTY);
+      InputFormatOptions<Job> opts = AccumuloInputFormat.configure()
+          
.clientProperties(getClientProps()).table(table).auths(Authorizations.EMPTY);
       if (sample)
         opts = opts.samplerConfiguration(SAMPLER_CONFIG);
 
@@ -409,7 +408,7 @@ public class AccumuloInputFormatIT extends 
AccumuloClusterHarness {
     client.tableOperations().create(table);
 
     InputFormatOptions<Job> opts = AccumuloInputFormat.configure()
-        
.clientProperties(getClientInfo().getProperties()).table(table).auths(auths);
+        .clientProperties(getClientProps()).table(table).auths(auths);
     
opts.fetchColumns(fetchColumns).scanIsolation(true).localIterators(true).store(job);
 
     AccumuloInputFormat aif = new AccumuloInputFormat();
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloOutputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloOutputFormatIT.java
index b2452b9031..5a70558d43 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloOutputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloOutputFormatIT.java
@@ -95,8 +95,8 @@ public class AccumuloOutputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormatClass(AccumuloInputFormat.class);
 
-      
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties())
-          .table(table1).auths(Authorizations.EMPTY).store(job);
+      
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table1)
+          .auths(Authorizations.EMPTY).store(job);
 
       job.setMapperClass(TestMapper.class);
       job.setMapOutputKeyClass(Key.class);
@@ -105,8 +105,8 @@ public class AccumuloOutputFormatIT extends 
AccumuloClusterHarness {
       job.setOutputKeyClass(Text.class);
       job.setOutputValueClass(Mutation.class);
 
-      
AccumuloOutputFormat.configure().clientProperties(getClientInfo().getProperties())
-          .defaultTable(table2).store(job);
+      
AccumuloOutputFormat.configure().clientProperties(getClientProps()).defaultTable(table2)
+          .store(job);
 
       job.setNumReduceTasks(0);
 
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloRowInputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloRowInputFormatIT.java
index b3f7346c4b..0c62290bdb 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloRowInputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloRowInputFormatIT.java
@@ -159,8 +159,8 @@ public class AccumuloRowInputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormatClass(AccumuloRowInputFormat.class);
 
-      
AccumuloRowInputFormat.configure().clientProperties(getClientInfo().getProperties())
-          .table(table).auths(Authorizations.EMPTY).store(job);
+      
AccumuloRowInputFormat.configure().clientProperties(getClientProps()).table(table)
+          .auths(Authorizations.EMPTY).store(job);
 
       job.setMapperClass(TestMapper.class);
       job.setMapOutputKeyClass(Key.class);
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/MultiTableInputFormatIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/MultiTableInputFormatIT.java
index cf158a5c68..104cd6f3c0 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/MultiTableInputFormatIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/MultiTableInputFormatIT.java
@@ -96,8 +96,8 @@ public class MultiTableInputFormatIT extends 
AccumuloClusterHarness {
 
       job.setInputFormatClass(AccumuloInputFormat.class);
 
-      
AccumuloInputFormat.configure().clientProperties(getClientInfo().getProperties())
-          .table(table1).table(table2).store(job);
+      
AccumuloInputFormat.configure().clientProperties(getClientProps()).table(table1).table(table2)
+          .store(job);
 
       job.setMapperClass(TestMapper.class);
       job.setMapOutputKeyClass(Key.class);
diff --git 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java
 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java
index 43aee531a4..cf2cf6b9d9 100644
--- 
a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java
+++ 
b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/TokenFileIT.java
@@ -157,7 +157,7 @@ public class TokenFileIT extends AccumuloClusterHarness {
       File tf = new File(tempDir, "client.properties");
       assertTrue(tf.createNewFile(), "Failed to create file: " + tf);
       try (PrintStream out = new PrintStream(tf)) {
-        getClientInfo().getProperties().store(out, "Credentials for " + 
getClass().getName());
+        getClientProps().store(out, "Credentials for " + getClass().getName());
       }
 
       MRTokenFileTester.main(new String[] {tf.getAbsolutePath(), table1, 
table2});
diff --git a/test/src/main/java/org/apache/accumulo/test/CloseScannerIT.java 
b/test/src/main/java/org/apache/accumulo/test/CloseScannerIT.java
index 88e1829c55..6a48d18c6c 100644
--- a/test/src/main/java/org/apache/accumulo/test/CloseScannerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CloseScannerIT.java
@@ -46,7 +46,7 @@ public class CloseScannerIT extends AccumuloClusterHarness {
 
       client.tableOperations().create(tableName);
 
-      ReadWriteIT.ingest(client, getClientInfo(), ROWS, COLS, 50, 0, 
tableName);
+      ReadWriteIT.ingest(client, ROWS, COLS, 50, 0, tableName);
 
       client.tableOperations().flush(tableName, null, null, true);
 
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java
index ba5670da9e..3cdac26fcb 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloClientIT.java
@@ -186,7 +186,7 @@ public class AccumuloClientIT extends 
AccumuloClusterHarness {
     assertEquals(0, SingletonManager.getReservationCount());
     assertEquals(Mode.CLIENT, SingletonManager.getMode());
 
-    try (AccumuloClient c = 
Accumulo.newClient().from(getClientInfo().getProperties()).build()) {
+    try (AccumuloClient c = 
Accumulo.newClient().from(getClientProps()).build()) {
       assertEquals(1, SingletonManager.getReservationCount());
 
       c.tableOperations().create(tableName);
@@ -205,7 +205,7 @@ public class AccumuloClientIT extends 
AccumuloClusterHarness {
 
     assertEquals(0, SingletonManager.getReservationCount());
 
-    AccumuloClient c = 
Accumulo.newClient().from(getClientInfo().getProperties()).build();
+    AccumuloClient c = Accumulo.newClient().from(getClientProps()).build();
     assertEquals(1, SingletonManager.getReservationCount());
 
     // ensure client created after everything was closed works
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java
index 2669237428..276a98a9c1 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java
@@ -28,7 +28,6 @@ import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.clientImpl.ClientInfo;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.TestIngest.IngestParams;
@@ -56,23 +55,21 @@ public class BulkIT extends AccumuloClusterHarness {
   @Test
   public void test() throws Exception {
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      runTest(client, getClientInfo(), getCluster().getFileSystem(),
-          getCluster().getTemporaryPath(), getUniqueNames(1)[0], 
this.getClass().getName(),
-          testName(), false);
+      runTest(client, getCluster().getFileSystem(), 
getCluster().getTemporaryPath(),
+          getUniqueNames(1)[0], this.getClass().getName(), testName(), false);
     }
   }
 
   @Test
   public void testOld() throws Exception {
     try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
-      runTest(client, getClientInfo(), getCluster().getFileSystem(),
-          getCluster().getTemporaryPath(), getUniqueNames(1)[0], 
this.getClass().getName(),
-          testName(), true);
+      runTest(client, getCluster().getFileSystem(), 
getCluster().getTemporaryPath(),
+          getUniqueNames(1)[0], this.getClass().getName(), testName(), true);
     }
   }
 
-  static void runTest(AccumuloClient c, ClientInfo info, FileSystem fs, Path 
basePath,
-      String tableName, String filePrefix, String dirSuffix, boolean useOld) 
throws Exception {
+  static void runTest(AccumuloClient c, FileSystem fs, Path basePath, String 
tableName,
+      String filePrefix, String dirSuffix, boolean useOld) throws Exception {
     c.tableOperations().create(tableName);
     Path base = new Path(basePath, "testBulkFail_" + dirSuffix);
     fs.delete(base, true);
@@ -85,7 +82,7 @@ public class BulkIT extends AccumuloClusterHarness {
     fs.mkdirs(bulkFailures);
     fs.mkdirs(files);
 
-    IngestParams params = new IngestParams(info.getProperties(), tableName, N);
+    IngestParams params = new IngestParams(c.properties(), tableName, N);
     params.timestamp = 1;
     params.random = 56;
     params.cols = 1;
@@ -102,7 +99,7 @@ public class BulkIT extends AccumuloClusterHarness {
     TestIngest.ingest(c, fs, params);
 
     bulkLoad(c, tableName, bulkFailures, files, useOld);
-    VerifyParams verifyParams = new VerifyParams(info.getProperties(), 
tableName, N);
+    VerifyParams verifyParams = new VerifyParams(c.properties(), tableName, N);
     verifyParams.random = 56;
     for (int i = 0; i < COUNT; i++) {
       verifyParams.startRow = i * N;
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
index e377a97278..f728241bd0 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
@@ -96,7 +96,7 @@ public class CredentialsIT extends AccumuloClusterHarness {
     assertFalse(token.isDestroyed());
     token.destroy();
     assertTrue(token.isDestroyed());
-    Properties props = getClientInfo().getProperties();
+    Properties props = getClientProps();
     var e = assertThrows(IllegalArgumentException.class,
         () -> Accumulo.newClient().from(props).as("non_existent_user", 
token).build().close());
     assertEquals(e.getMessage(), "AuthenticationToken has been destroyed");
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index a06394a225..aa577506dc 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -65,7 +65,6 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.clientImpl.ClientContext;
-import org.apache.accumulo.core.clientImpl.ClientInfo;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -135,8 +134,8 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     cluster.getClusterControl().startAllServers(ServerType.MONITOR);
     try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
-      ingest(accumuloClient, getClientInfo(), ROWS, COLS, 50, 0, tableName);
-      verify(accumuloClient, getClientInfo(), ROWS, COLS, 50, 0, tableName);
+      ingest(accumuloClient, ROWS, COLS, 50, 0, tableName);
+      verify(accumuloClient, ROWS, COLS, 50, 0, tableName);
       String monitorLocation = null;
       while (monitorLocation == null) {
         monitorLocation = MonitorUtil.getLocation((ClientContext) 
accumuloClient);
@@ -188,14 +187,14 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     }
   }
 
-  public static void ingest(AccumuloClient accumuloClient, ClientInfo info, 
int rows, int cols,
-      int width, int offset, String tableName) throws Exception {
-    ingest(accumuloClient, info, rows, cols, width, offset, COLF, tableName);
+  public static void ingest(AccumuloClient accumuloClient, int rows, int cols, 
int width,
+      int offset, String tableName) throws Exception {
+    ingest(accumuloClient, rows, cols, width, offset, COLF, tableName);
   }
 
-  public static void ingest(AccumuloClient accumuloClient, ClientInfo info, 
int rows, int cols,
-      int width, int offset, String colf, String tableName) throws Exception {
-    IngestParams params = new IngestParams(info.getProperties(), tableName, 
rows);
+  public static void ingest(AccumuloClient accumuloClient, int rows, int cols, 
int width,
+      int offset, String colf, String tableName) throws Exception {
+    IngestParams params = new IngestParams(accumuloClient.properties(), 
tableName, rows);
     params.cols = cols;
     params.dataSize = width;
     params.startRow = offset;
@@ -204,14 +203,14 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     TestIngest.ingest(accumuloClient, params);
   }
 
-  public static void verify(AccumuloClient accumuloClient, ClientInfo info, 
int rows, int cols,
-      int width, int offset, String tableName) throws Exception {
-    verify(accumuloClient, info, rows, cols, width, offset, COLF, tableName);
+  public static void verify(AccumuloClient accumuloClient, int rows, int cols, 
int width,
+      int offset, String tableName) throws Exception {
+    verify(accumuloClient, rows, cols, width, offset, COLF, tableName);
   }
 
-  private static void verify(AccumuloClient accumuloClient, ClientInfo info, 
int rows, int cols,
-      int width, int offset, String colf, String tableName) throws Exception {
-    VerifyParams params = new VerifyParams(info.getProperties(), tableName, 
rows);
+  private static void verify(AccumuloClient accumuloClient, int rows, int 
cols, int width,
+      int offset, String colf, String tableName) throws Exception {
+    VerifyParams params = new VerifyParams(accumuloClient.properties(), 
tableName, rows);
     params.rows = rows;
     params.dataSize = width;
     params.startRow = offset;
@@ -261,8 +260,8 @@ public class ReadWriteIT extends AccumuloClusterHarness {
     // write a few large values
     try (AccumuloClient accumuloClient = 
Accumulo.newClient().from(getClientProps()).build()) {
       String table = getUniqueNames(1)[0];
-      ingest(accumuloClient, getClientInfo(), 2, 1, 500000, 0, table);
-      verify(accumuloClient, getClientInfo(), 2, 1, 500000, 0, table);
+      ingest(accumuloClient, 2, 1, 500000, 0, table);
+      verify(accumuloClient, 2, 1, 500000, 0, table);
     }
   }
 
@@ -279,23 +278,23 @@ public class ReadWriteIT extends AccumuloClusterHarness {
       throws Exception {
     final AtomicBoolean fail = new AtomicBoolean(false);
     final int CHUNKSIZE = ROWS / 10;
-    ingest(accumuloClient, getClientInfo(), CHUNKSIZE, 1, 50, 0, tableName);
+    ingest(accumuloClient, CHUNKSIZE, 1, 50, 0, tableName);
     int i;
     for (i = 0; i < ROWS; i += CHUNKSIZE) {
       final int start = i;
       Thread verify = new Thread(() -> {
         try {
-          verify(accumuloClient, getClientInfo(), CHUNKSIZE, 1, 50, start, 
tableName);
+          verify(accumuloClient, CHUNKSIZE, 1, 50, start, tableName);
         } catch (Exception ex) {
           fail.set(true);
         }
       });
       verify.start();
-      ingest(accumuloClient, getClientInfo(), CHUNKSIZE, 1, 50, i + CHUNKSIZE, 
tableName);
+      ingest(accumuloClient, CHUNKSIZE, 1, 50, i + CHUNKSIZE, tableName);
       verify.join();
       assertFalse(fail.get());
     }
-    verify(accumuloClient, getClientInfo(), CHUNKSIZE, 1, 50, i, tableName);
+    verify(accumuloClient, CHUNKSIZE, 1, 50, i, tableName);
   }
 
   public static Text t(String s) {
@@ -316,7 +315,7 @@ public class ReadWriteIT extends AccumuloClusterHarness {
       accumuloClient.tableOperations().create(tableName);
       accumuloClient.tableOperations().setProperty(tableName, 
"table.group.g1", "colf");
       accumuloClient.tableOperations().setProperty(tableName, 
"table.groups.enabled", "g1");
-      ingest(accumuloClient, getClientInfo(), 2000, 1, 50, 0, tableName);
+      ingest(accumuloClient, 2000, 1, 50, 0, tableName);
       accumuloClient.tableOperations().compact(tableName, null, null, true, 
true);
       try (BatchWriter bw = accumuloClient.createBatchWriter(tableName)) {
         bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
@@ -373,8 +372,8 @@ public class ReadWriteIT extends AccumuloClusterHarness {
 
   private void verifyLocalityGroupsInRFile(final AccumuloClient accumuloClient,
       final String tableName) throws Exception {
-    ingest(accumuloClient, getClientInfo(), 2000, 1, 50, 0, tableName);
-    verify(accumuloClient, getClientInfo(), 2000, 1, 50, 0, tableName);
+    ingest(accumuloClient, 2000, 1, 50, 0, tableName);
+    verify(accumuloClient, 2000, 1, 50, 0, tableName);
     accumuloClient.tableOperations().flush(tableName, null, null, true);
     try (BatchScanner bscanner =
         accumuloClient.createBatchScanner(MetadataTable.NAME, 
Authorizations.EMPTY, 1)) {
@@ -425,9 +424,9 @@ public class ReadWriteIT extends AccumuloClusterHarness {
       int i = 0;
       for (String cfg : config) {
         to.setLocalityGroups(table, getGroups(cfg));
-        ingest(accumuloClient, getClientInfo(), ROWS * (i + 1), 1, 50, ROWS * 
i, table);
+        ingest(accumuloClient, ROWS * (i + 1), 1, 50, ROWS * i, table);
         to.flush(table, null, null, true);
-        verify(accumuloClient, getClientInfo(), 0, 1, 50, ROWS * (i + 1), 
table);
+        verify(accumuloClient, 0, 1, 50, ROWS * (i + 1), table);
         i++;
       }
       to.delete(table);
@@ -435,12 +434,12 @@ public class ReadWriteIT extends AccumuloClusterHarness {
       config = new String[] {"lg1:colf", null, "lg1:colf,xyz", 
"lg1:colf;lg2:colf",};
       i = 1;
       for (String cfg : config) {
-        ingest(accumuloClient, getClientInfo(), ROWS * i, 1, 50, 0, table);
-        ingest(accumuloClient, getClientInfo(), ROWS * i, 1, 50, 0, "xyz", 
table);
+        ingest(accumuloClient, ROWS * i, 1, 50, 0, table);
+        ingest(accumuloClient, ROWS * i, 1, 50, 0, "xyz", table);
         to.setLocalityGroups(table, getGroups(cfg));
         to.flush(table, null, null, true);
-        verify(accumuloClient, getClientInfo(), ROWS * i, 1, 50, 0, table);
-        verify(accumuloClient, getClientInfo(), ROWS * i, 1, 50, 0, "xyz", 
table);
+        verify(accumuloClient, ROWS * i, 1, 50, 0, table);
+        verify(accumuloClient, ROWS * i, 1, 50, 0, "xyz", table);
         i++;
       }
     }
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
 
b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
index d3b1951c82..69e8d1f505 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
@@ -28,7 +28,6 @@ import java.util.Properties;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.clientImpl.ClientInfo;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
@@ -75,11 +74,10 @@ public class RecoveryWithEmptyRFileIT extends 
ConfigurableMacBase {
     log.info("Ingest some data, verify it was stored properly, replace an"
         + " underlying rfile with an empty one and verify we can scan.");
     Properties props = getClientProperties();
-    ClientInfo info = ClientInfo.from(props);
     try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
       String tableName = getUniqueNames(1)[0];
-      ReadWriteIT.ingest(client, info, ROWS, COLS, 50, 0, tableName);
-      ReadWriteIT.verify(client, info, ROWS, COLS, 50, 0, tableName);
+      ReadWriteIT.ingest(client, ROWS, COLS, 50, 0, tableName);
+      ReadWriteIT.verify(client, ROWS, COLS, 50, 0, tableName);
 
       client.tableOperations().flush(tableName, null, null, true);
       client.tableOperations().offline(tableName, true);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java 
b/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java
index f03ae1686e..bf85a71672 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java
@@ -23,7 +23,6 @@ import java.util.Properties;
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.clientImpl.ClientInfo;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -75,7 +74,7 @@ public class SslIT extends ConfigurableMacBase {
   public void bulk() throws Exception {
     Properties props = getClientProperties();
     try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
-      BulkIT.runTest(client, ClientInfo.from(props), cluster.getFileSystem(),
+      BulkIT.runTest(client, cluster.getFileSystem(),
           new Path(getCluster().getConfig().getDir().getAbsolutePath(), "tmp"),
           getUniqueNames(1)[0], this.getClass().getName(), testName(), true);
     }

Reply via email to