keith-turner commented on code in PR #5474:
URL: https://github.com/apache/accumulo/pull/5474#discussion_r2054322830


##########
test/src/main/java/org/apache/accumulo/test/ComprehensiveTableOperationsIT.java:
##########
@@ -0,0 +1,957 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.lang.reflect.Method;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.admin.TabletAvailability;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
+import org.apache.accumulo.core.clientImpl.ClientContext;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filter;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.AccumuloTable;
+import org.apache.accumulo.core.metadata.ScanServerRefTabletFile;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.test.functional.BasicSummarizer;
+import org.apache.accumulo.test.functional.BulkNewIT;
+import org.apache.accumulo.test.functional.CloneTestIT;
+import org.apache.accumulo.test.functional.CompactionIT;
+import org.apache.accumulo.test.functional.ConstraintIT;
+import org.apache.accumulo.test.functional.DeleteRowsIT;
+import org.apache.accumulo.test.functional.LocalityGroupIT;
+import org.apache.accumulo.test.functional.ManagerAssignmentIT;
+import org.apache.accumulo.test.functional.MergeTabletsIT;
+import org.apache.accumulo.test.functional.ReadWriteIT;
+import org.apache.accumulo.test.functional.RenameIT;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.accumulo.test.functional.SummaryIT;
+import org.apache.accumulo.test.functional.TabletAvailabilityIT;
+import org.apache.accumulo.test.util.Wait;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+
+/**
+ * A comprehensive IT of all table operations against user tables and all 
system tables while
+ * avoiding duplicating existing testing. This does not test for edge cases, 
but rather tests for
+ * basic expected functionality of all table operations against user tables 
and all system tables.
+ */
+public class ComprehensiveTableOperationsIT extends SharedMiniClusterBase {
+  private static final Logger log = 
LoggerFactory.getLogger(ComprehensiveTableOperationsIT.class);
+  private static final String SLOW_ITER_NAME = "CustomSlowIter";
+  private AccumuloClient client;
+  private TableOperations ops;
+  private String userTable;
+
+  @Override
+  protected Duration defaultTimeout() {
+    return Duration.ofMinutes(1);
+  }
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
+
+  @BeforeEach
+  public void beforeEach() {
+    client = Accumulo.newClient().from(getClientProps()).build();
+    ops = client.tableOperations();
+  }
+
+  @AfterEach
+  public void afterEach() throws Exception {
+    // ensure none of the FATE or SCAN_REF data we created persists between 
tests. Also ensure the
+    // user table does not persist between tests
+    if (userTable != null && ops.exists(userTable)) {
+      cleanupFateTable();
+      ops.delete(userTable);
+    }
+    cleanupScanRefTable();
+    client.close();
+  }
+
+  @Test
+  public void testAllTested() {
+    var allTableOps = 
Arrays.stream(TableOperations.class.getDeclaredMethods()).map(Method::getName)
+        .collect(Collectors.toSet());
+    var testMethodNames = 
Arrays.stream(ComprehensiveTableOperationsIT.class.getDeclaredMethods())
+        .map(Method::getName).collect(Collectors.toSet());
+    var allTableOpsIter = allTableOps.iterator();
+    while (allTableOpsIter.hasNext()) {
+      var tableOp = allTableOpsIter.next();
+      for (var testMethodName : testMethodNames) {
+        if (testMethodName.contains(tableOp)) {
+          allTableOpsIter.remove();
+          break;
+        }
+      }
+    }
+    assertTrue(allTableOps.isEmpty(), "The table operations " + allTableOps + 
" are untested");
+  }
+
+  @Test
+  public void testExpectedSystemTables() {
+    var testedSystemTableIds =
+        Set.of(AccumuloTable.ROOT.tableId(), AccumuloTable.METADATA.tableId(),
+            AccumuloTable.FATE.tableId(), AccumuloTable.SCAN_REF.tableId());
+    assertEquals(testedSystemTableIds, AccumuloTable.allTableIds(),
+        "There are newly added system tables that are untested in this IT. 
Ensure each "
+            + "test has testing for the new system table");
+  }
+
+  @Test
+  public void test_list_tableIdMap() throws Exception {
+    // Thoroughly tested elsewhere, but simple enough to test here. Test that 
all system tables
+    // and user tables are returned
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    var expected = 
Arrays.stream(AccumuloTable.values()).map(AccumuloTable::tableName)
+        .collect(Collectors.toSet());
+    expected.add(userTable);
+    assertEquals(expected, ops.list());
+    assertEquals(expected, ops.tableIdMap().keySet());
+  }
+
+  @Test
+  public void test_exists() throws Exception {
+    // Thoroughly tested elsewhere, but simple enough to test here. Test that 
all system tables
+    // and user tables exist
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    var expected = 
Arrays.stream(AccumuloTable.values()).map(AccumuloTable::tableName)
+        .collect(Collectors.toSet());
+    expected.add(userTable);
+    for (String table : expected) {
+      assertTrue(ops.exists(table));
+    }
+  }
+
+  @Test
+  public void test_create() throws Exception {
+    // Creating user tables is thoroughly tested. Make sure we can't create 
any of the already
+    // existing system tables, though.
+    for (var systemTable : AccumuloTable.values()) {
+      assertThrows(AccumuloException.class, () -> 
ops.create(systemTable.tableName()));
+    }
+  }
+
+  @Test
+  public void test_exportTable_importTable() throws Exception {
+    // exportTable, importTable for user tables is tested in ImportExportIT. 
Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(ImportExportIT.class.getName()));
+    // exportTable, importTable untested for system tables. Test basic 
functionality here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    Path baseDir = ImportExportIT.createBaseDir(getCluster(), getClass());
+    var fs = getCluster().getFileSystem();
+
+    // export the user table, so we can test importing into the system tables
+    ops.offline(userTable, true);
+    Path exportUserDir;
+    exportUserDir = new Path(baseDir, "export_userdir");
+    fs.deleteOnExit(exportUserDir);
+    ops.exportTable(userTable, exportUserDir.toString());
+
+    var sysTables = AccumuloTable.values();
+    for (int i = 0; i < sysTables.length; i++) {
+      var sysTable = sysTables[i];
+      Path exportDir = new Path(baseDir, "export" + i);
+      fs.deleteOnExit(exportDir);
+
+      // not offline, can't export
+      assertThrows(IllegalStateException.class,
+          () -> ops.exportTable(sysTable.tableName(), exportDir.toString()));
+
+      switch (sysTable) {
+        case ROOT:
+        case METADATA:
+          // can't offline, so will never be able to export
+          assertThrows(AccumuloException.class, () -> 
ops.offline(sysTable.tableName(), true));
+          assertThrows(AccumuloException.class,
+              () -> ops.importTable(sysTable.tableName(), 
exportUserDir.toString()));
+          break;
+        case FATE:
+        case SCAN_REF:
+          ops.offline(sysTable.tableName(), true);
+          try {
+            assertThrows(AccumuloException.class,
+                () -> ops.exportTable(sysTable.tableName(), 
exportDir.toString()));
+            assertThrows(AccumuloException.class,
+                () -> ops.importTable(sysTable.tableName(), 
exportUserDir.toString()));
+          } finally {
+            ops.online(sysTable.tableName(), true);
+          }
+          break;
+        default:
+          throw new IllegalStateException("Unrecognized table: " + sysTable);
+      }
+    }
+  }
+
+  @Test
+  public void test_addSplits_putSplits_listSplits_splitRangeByTablets() throws 
Exception {
+    // note that addSplits and putSplits are implemented the same, just take 
different args. No
+    // need to test both
+
+    // addSplits, listSplits tested elsewhere for METADATA, ROOT, and user 
tables, but testing here
+    // as well since this setup is needed to test for splitRangeByTablets 
anyway, which is untested
+    // elsewhere
+
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    // system and user tables
+    var allTables = new ArrayList<>(ops.list());
+
+    for (int i = 0; i < allTables.size(); i++) {
+      SortedSet<Text> splits = new TreeSet<>();
+      splits.add(new Text(i + ""));
+      String table = allTables.get(i);
+
+      if (table.equals(AccumuloTable.ROOT.tableName())) {
+        // cannot add splits to ROOT
+        assertThrows(AccumuloException.class, () -> ops.addSplits(table, 
splits));
+        assertEquals(0, ops.listSplits(table).size());
+      } else {
+        ops.addSplits(table, splits);
+        assertTrue(ops.listSplits(table).containsAll(splits));
+      }
+
+      assertEquals(ops.splitRangeByTablets(table, new Range(), 99).size(),
+          ops.listSplits(table).size() + 1);
+    }
+  }
+
+  @Test
+  public void test_locate() throws Exception {
+    // locate for user tables is tested in LocatorIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(LocatorIT.class.getName()));
+    // locate for METADATA and ROOT tables is tested in ManagerAssignmentIT. 
Ensure test exists
+    assertDoesNotThrow(() -> 
Class.forName(ManagerAssignmentIT.class.getName()));
+
+    // basic functionality check for locate on FATE and SCAN_REF tables
+    var fateLocations =
+        ops.locate(AccumuloTable.FATE.tableName(), 
Collections.singletonList(new Range()));
+    var fateGroupByTablet = fateLocations.groupByTablet().keySet();
+    assertFalse(fateGroupByTablet.isEmpty());
+    fateGroupByTablet.forEach(tid -> {
+      var tabletLoc = fateLocations.getTabletLocation(tid);
+      assertNotNull(tabletLoc);
+      assertTrue(tabletLoc.contains(":"));
+    });
+    var scanRefLocations =
+        ops.locate(AccumuloTable.SCAN_REF.tableName(), 
Collections.singletonList(new Range()));
+    var scanRefGroupByTablet = scanRefLocations.groupByTablet().keySet();
+    assertFalse(scanRefGroupByTablet.isEmpty());
+    scanRefGroupByTablet.forEach(tid -> {
+      var tabletLoc = scanRefLocations.getTabletLocation(tid);
+      assertNotNull(tabletLoc);
+      assertTrue(tabletLoc.contains(":"));
+    });
+  }
+
+  @Test
+  public void test_getMaxRow_deleteRows() throws Exception {
+    // getMaxRow for user tables is tested in FindMaxIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(FindMaxIT.class.getName()));
+    // getMaxRow not tested for system tables. Test basic functionality here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    createFateTableRow(userTable);
+    createScanRefTableRow();
+    for (var sysTable : AccumuloTable.values()) {
+      var maxRow =
+          ops.getMaxRow(sysTable.tableName(), Authorizations.EMPTY, null, 
true, null, true);
+      log.info("Max row of {} : {}", sysTable, maxRow);
+      assertNotNull(maxRow);
+    }
+
+    // deleteRows for user tables is tested in DeleteRowsIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(DeleteRowsIT.class.getName()));
+    // deleteRows not tested for system tables. Test basic functionality here
+    for (var sysTable : AccumuloTable.values()) {
+      switch (sysTable) {
+        case METADATA:
+        case ROOT:
+          // should not be able to delete rows of METADATA or ROOT tables
+          assertThrows(AccumuloException.class,
+              () -> ops.deleteRows(sysTable.tableName(), null, null));
+          break;
+        case FATE:
+          break;
+        case SCAN_REF:
+          cleanupScanRefTable();
+          // check max row again
+          assertNull(
+              ops.getMaxRow(sysTable.tableName(), Authorizations.EMPTY, null, 
true, null, true));
+          break;
+        default:
+          throw new IllegalStateException("Unrecognized table: " + sysTable);
+      }
+    }
+  }
+
+  @Test
+  public void test_merge() throws Exception {
+    // merge for user tables is tested in various ITs. One example is 
MergeTabletsIT. Ensure
+    // test exists
+    assertDoesNotThrow(() -> Class.forName(MergeTabletsIT.class.getName()));
+    // merge for METADATA and ROOT system tables tested in MetaSplitIT. Ensure 
test exists
+    assertDoesNotThrow(() -> Class.forName(MetaSplitIT.class.getName()));
+    // merge for FATE and SCAN_REF tables not tested. Test basic functionality 
here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    var fateRow1 = createFateTableRow(userTable);
+    createFateTableRow(userTable); // fate row 2
+    var scanRefRow1 = createScanRefTableRow();
+    createScanRefTableRow(); // scan ref row 2
+
+    SortedSet<Text> fateSplits = new TreeSet<>();
+    fateSplits.add(new Text(fateRow1));
+    ops.addSplits(AccumuloTable.FATE.tableName(), fateSplits);
+    // there may be preexisting splits on the FATE table from a previous test
+    
assertTrue(ops.listSplits(AccumuloTable.FATE.tableName()).contains(fateRow1));
+    ops.merge(AccumuloTable.FATE.tableName(), null, null);
+    assertTrue(ops.listSplits(AccumuloTable.FATE.tableName()).isEmpty());
+
+    SortedSet<Text> scanRefSplits = new TreeSet<>();
+    scanRefSplits.add(new Text(scanRefRow1));
+    ops.addSplits(AccumuloTable.SCAN_REF.tableName(), scanRefSplits);
+    // there may be preexisting splits on the SCAN_REF table from a previous 
test
+    
assertTrue(ops.listSplits(AccumuloTable.SCAN_REF.tableName()).contains(scanRefRow1));
+    ops.merge(AccumuloTable.SCAN_REF.tableName(), null, null);
+    assertTrue(ops.listSplits(AccumuloTable.SCAN_REF.tableName()).isEmpty());
+  }
+
+  @Test
+  public void test_compact() throws Exception {
+    // compact for user tables is tested in various ITs. One example is 
CompactionIT. Ensure
+    // test exists
+    assertDoesNotThrow(() -> Class.forName(CompactionIT.class.getName()));
+    // disable the GC to prevent automatic compactions on METADATA and ROOT 
tables
+    
getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
+    try {
+      // test basic functionality for system tables
+      userTable = getUniqueNames(1)[0];
+      ops.create(userTable);
+
+      // create some RFiles for the METADATA and ROOT tables by creating some 
data in the user
+      // table, flushing that table, then the METADATA table, then the ROOT 
table
+      for (int i = 0; i < 3; i++) {
+        try (var bw = client.createBatchWriter(userTable)) {
+          var mut = new Mutation("r" + i);
+          mut.put("cf", "cq", "v");
+          bw.addMutation(mut);
+        }
+        ops.flush(userTable, null, null, true);
+        ops.flush(AccumuloTable.METADATA.tableName(), null, null, true);
+        ops.flush(AccumuloTable.ROOT.tableName(), null, null, true);
+      }
+
+      for (var sysTable : List.of(AccumuloTable.ROOT, AccumuloTable.METADATA,
+          AccumuloTable.SCAN_REF, AccumuloTable.FATE)) {
+        // create some RFiles for FATE and SCAN_REF tables
+        if (sysTable == AccumuloTable.SCAN_REF) {
+          createScanRefTableRow();
+          ops.flush(AccumuloTable.SCAN_REF.tableName(), null, null, true);
+        } else if (sysTable == AccumuloTable.FATE) {
+          createFateTableRow(userTable);
+          ops.flush(AccumuloTable.FATE.tableName(), null, null, true);
+        }
+
+        Set<StoredTabletFile> stfsBeforeCompact = getStoredTabFiles(sysTable);
+
+        log.info("Compacting " + sysTable);
+        ops.compact(sysTable.tableName(), null, null, true, true);
+        log.info("Finished compacting " + sysTable);
+
+        // RFiles resulting from a compaction begin with 'A'. Wait until we 
see an RFile beginning
+        // with 'A' that was not present before the compaction.
+        Wait.waitFor(() -> {
+          var stfsAfterCompact = getStoredTabFiles(sysTable);
+          String regex = "^A.*\\.rf$";
+          var A_stfsBeforeCompaction = stfsBeforeCompact.stream()
+              .filter(stf -> 
stf.getFileName().matches(regex)).collect(Collectors.toSet());
+          var A_stfsAfterCompaction = stfsAfterCompact.stream()
+              .filter(stf -> 
stf.getFileName().matches(regex)).collect(Collectors.toSet());
+          return !Sets.difference(A_stfsAfterCompaction, 
A_stfsBeforeCompaction).isEmpty();
+        });
+      }
+    } finally {
+      
getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
+    }
+  }
+
+  @Test
+  public void test_cancelCompaction() throws Exception {
+    // cancelCompaction for user tables is tested in various ITs. One example 
is TableOperationsIT.
+    // Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(TableOperationsIT.class.getName()));
+    // test basic functionality for system tables
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+
+    // Need some data in all the system tables. This allows the slow iterator 
we attach to the
+    // system table to work when we compact. ROOT and METADATA will already 
have data, so just need
+    // to create some data for the other system tables
+    createFateTableRow(userTable);
+    createScanRefTableRow();
+
+    for (var sysTable : AccumuloTable.values()) {
+      try {
+        var zrw = 
getCluster().getServerContext().getZooSession().asReaderWriter();
+        attachSlowMajcIterator(sysTable.tableName());
+
+        var metaFatesBeforeCompact = new 
HashSet<>(zrw.getChildren(Constants.ZFATE));
+
+        log.info("Compacting " + sysTable);
+        ops.compact(sysTable.tableName(), null, null, true, false);
+        log.info("Initiated compaction for " + sysTable);
+
+        // Wait for the compaction to be started
+        Wait.waitFor(() -> {
+          var metaFatesAfterCompact = new 
HashSet<>(zrw.getChildren(Constants.ZFATE));
+          return !Sets.difference(metaFatesAfterCompact, 
metaFatesBeforeCompact).isEmpty();
+        });
+
+        log.info("Cancelling compaction " + sysTable);
+        ops.cancelCompaction(sysTable.tableName());
+
+        // We can be sure that the compaction has been cancelled once we see 
no FATE operations
+        Wait.waitFor(() -> {
+          Set<String> metaFatesAfterCancelCompact = new 
HashSet<>(zrw.getChildren(Constants.ZFATE));
+          return metaFatesAfterCancelCompact.isEmpty();
+        });
+      } finally {
+        removeSlowMajcIterator(sysTable.tableName());
+      }
+    }
+  }
+
+  @Test
+  public void test_delete() throws Exception {
+    // delete for user tables is tested in TableOperationsIT. Ensure test 
exists
+    assertDoesNotThrow(() -> Class.forName(TableOperationsIT.class.getName()));
+    // delete not tested for system tables. Test basic functionality here
+    for (var sysTable : AccumuloTable.values()) {
+      assertThrows(AccumuloException.class, () -> 
ops.delete(sysTable.tableName()));
+    }
+  }
+
+  @Test
+  public void test_clone() throws Exception {
+    // cloning user and system tables is tested in CloneTestIT. Ensure test 
exists
+    assertDoesNotThrow(() -> Class.forName(CloneTestIT.class.getName()));
+  }
+
+  @Test
+  public void test_rename() throws Exception {
+    // rename for user tables is tested in RenameIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(RenameIT.class.getName()));
+    // rename not tested for system tables. Test basic functionality here
+    var sysTables = AccumuloTable.values();
+    var tableNames = getUniqueNames(sysTables.length);
+
+    for (int i = 0; i < sysTables.length; i++) {
+      var sysTable = sysTables[i];
+      var tableName = tableNames[i];
+      assertThrows(AccumuloException.class,
+          () -> client.tableOperations().rename(sysTable.tableName(), 
tableName));
+    }
+  }
+
+  @Test
+  public void test_flush() throws Exception {
+    // flush for user tables and the METADATA and ROOT tables is tested in 
CompactionIT. Ensure
+    // test exists
+    assertDoesNotThrow(() -> Class.forName(CompactionIT.class.getName()));
+    // flush for FATE and SCAN_REF not tested for. Test basic functionality 
here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    createFateTableRow(userTable);
+    createScanRefTableRow();
+
+    for (var sysTable : Set.of(AccumuloTable.FATE, AccumuloTable.SCAN_REF)) {
+      int numFilesBeforeFlush = 0;
+
+      try (TabletsMetadata tabletsMetadata = 
getCluster().getServerContext().getAmple()
+          .readTablets().forTable(sysTable.tableId()).build()) {
+        for (var tm : tabletsMetadata) {
+          numFilesBeforeFlush += tm.getFiles().size();
+        }
+      }
+      ops.flush(sysTable.tableName(), null, null, true);
+      // Wait until we see the new file
+      final int finalNumFilesBeforeFlush = numFilesBeforeFlush;
+      Wait.waitFor(() -> {
+        try (TabletsMetadata tabletsMetadata = 
getCluster().getServerContext().getAmple()
+            .readTablets().forTable(sysTable.tableId()).build()) {
+          int numFilesAfterFlush = 0;
+          for (var tm : tabletsMetadata) {
+            numFilesAfterFlush += tm.getFiles().size();
+          }
+          return numFilesAfterFlush > finalNumFilesBeforeFlush;
+        }
+      });
+    }
+  }
+
+  @Test
+  public void
+      
test_setProperty_modifyProperties_removeProperty_getProperties_getTableProperties_getConfiguration()
+          throws Exception {
+    // These may be tested elsewhere across several tests, but simpler to test 
all here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    // system and user tables
+    Set<String> allTables = ops.list();
+    for (String tableName : allTables) {
+      // setProperty
+      String propKey = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "prop";
+      String propVal = "val";
+      String newPropVal = "newval";
+      ops.setProperty(tableName, propKey, propVal);
+      // getProperties, getTableProperties, and getConfiguration
+      assertTrue(propFound(tableName, propKey, propVal));
+      assertEquals(propVal, ops.getTableProperties(tableName).get(propKey));
+      assertEquals(propVal, ops.getConfiguration(tableName).get(propKey));
+      // modifyProperties
+      ops.modifyProperties(tableName, properties -> properties.put(propKey, 
newPropVal));
+      // getProperties, getTableProperties, and getConfiguration
+      assertTrue(propFound(tableName, propKey, newPropVal));
+      assertEquals(newPropVal, ops.getTableProperties(tableName).get(propKey));
+      assertEquals(newPropVal, ops.getConfiguration(tableName).get(propKey));
+      // removeProperty
+      ops.removeProperty(tableName, propKey);
+      // getProperties, getTableProperties, and getConfiguration
+      assertFalse(propFound(tableName, propKey, newPropVal));
+      assertNull(ops.getTableProperties(tableName).get(propKey));
+      assertNull(ops.getConfiguration(tableName).get(propKey));
+    }
+  }
+
+  @Test
+  public void test_setLocalityGroups_getLocalityGroups() throws Exception {
+    // setLocalityGroups, getLocalityGroups for user tables is tested in 
LocalityGroupIT. Ensure
+    // test exists
+    assertDoesNotThrow(() -> Class.forName(LocalityGroupIT.class.getName()));
+    // setLocalityGroups, getLocalityGroups for system tables not tested for. 
Test basic
+    // functionality here
+    for (var sysTable : AccumuloTable.values()) {
+      LocalityGroupIT.createAndSetLocalityGroups(client, sysTable.tableName());
+      LocalityGroupIT.verifyLocalityGroupSet(client, sysTable.tableName());
+    }
+  }
+
+  @Test
+  public void test_importDirectory() throws Exception {
+    // importDirectory for user tables is tested in BulkNewIT. Ensure test 
exists
+    assertDoesNotThrow(() -> Class.forName(BulkNewIT.class.getName()));
+    // importDirectory for system tables not tested for. Test basic 
functionality here
+    var rootPath = getCluster().getTemporaryPath().toString();
+    var dir = rootPath + "/" + getUniqueNames(1)[0];
+    BulkNewIT.writeData(dir + "/f1.", 
getCluster().getServerContext().getConfiguration(), 0, 5, 0);
+
+    for (var sysTable : AccumuloTable.values()) {
+      assertThrows(Exception.class, () -> 
ops.importDirectory(dir).to(sysTable.tableName()).load());

Review Comment:
   For these different exceptions you are seeing, do they seem correct?  Like 
is the AccumuloException related to a not security issue and it the 
AccumuloSecurityException related to a security issue?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@accumulo.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to