kevinrr888 commented on code in PR #5474:
URL: https://github.com/apache/accumulo/pull/5474#discussion_r2052576349


##########
test/src/main/java/org/apache/accumulo/test/ComprehensiveTableOperationsIT.java:
##########
@@ -0,0 +1,957 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.lang.reflect.Method;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Accumulo;
+import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.admin.TabletAvailability;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.client.summary.SummarizerConfiguration;
+import org.apache.accumulo.core.clientImpl.ClientContext;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filter;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.AccumuloTable;
+import org.apache.accumulo.core.metadata.ScanServerRefTabletFile;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.test.functional.BasicSummarizer;
+import org.apache.accumulo.test.functional.BulkNewIT;
+import org.apache.accumulo.test.functional.CloneTestIT;
+import org.apache.accumulo.test.functional.CompactionIT;
+import org.apache.accumulo.test.functional.ConstraintIT;
+import org.apache.accumulo.test.functional.DeleteRowsIT;
+import org.apache.accumulo.test.functional.LocalityGroupIT;
+import org.apache.accumulo.test.functional.ManagerAssignmentIT;
+import org.apache.accumulo.test.functional.MergeTabletsIT;
+import org.apache.accumulo.test.functional.ReadWriteIT;
+import org.apache.accumulo.test.functional.RenameIT;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.accumulo.test.functional.SummaryIT;
+import org.apache.accumulo.test.functional.TabletAvailabilityIT;
+import org.apache.accumulo.test.util.Wait;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+
+/**
+ * A comprehensive IT of all table operations against user tables and all 
system tables while
+ * avoiding duplicating existing testing. This does not test for edge cases, 
but rather tests for
+ * basic expected functionality of all table operations against user tables 
and all system tables.
+ */
+public class ComprehensiveTableOperationsIT extends SharedMiniClusterBase {
+  private static final Logger log = 
LoggerFactory.getLogger(ComprehensiveTableOperationsIT.class);
+  private static final String SLOW_ITER_NAME = "CustomSlowIter";
+  private AccumuloClient client;
+  private TableOperations ops;
+  private String userTable;
+
+  @Override
+  protected Duration defaultTimeout() {
+    return Duration.ofMinutes(1);
+  }
+
+  @BeforeAll
+  public static void setup() throws Exception {
+    SharedMiniClusterBase.startMiniCluster();
+  }
+
+  @AfterAll
+  public static void teardown() {
+    SharedMiniClusterBase.stopMiniCluster();
+  }
+
+  @BeforeEach
+  public void beforeEach() {
+    client = Accumulo.newClient().from(getClientProps()).build();
+    ops = client.tableOperations();
+  }
+
+  @AfterEach
+  public void afterEach() throws Exception {
+    // ensure none of the FATE or SCAN_REF data we created persists between 
tests. Also ensure the
+    // user table does not persist between tests
+    if (userTable != null && ops.exists(userTable)) {
+      cleanupFateTable();
+      ops.delete(userTable);
+    }
+    cleanupScanRefTable();
+    client.close();
+  }
+
+  @Test
+  public void testAllTested() {
+    var allTableOps = 
Arrays.stream(TableOperations.class.getDeclaredMethods()).map(Method::getName)
+        .collect(Collectors.toSet());
+    var testMethodNames = 
Arrays.stream(ComprehensiveTableOperationsIT.class.getDeclaredMethods())
+        .map(Method::getName).collect(Collectors.toSet());
+    var allTableOpsIter = allTableOps.iterator();
+    while (allTableOpsIter.hasNext()) {
+      var tableOp = allTableOpsIter.next();
+      for (var testMethodName : testMethodNames) {
+        if (testMethodName.contains(tableOp)) {
+          allTableOpsIter.remove();
+          break;
+        }
+      }
+    }
+    assertTrue(allTableOps.isEmpty(), "The table operations " + allTableOps + 
" are untested");
+  }
+
+  @Test
+  public void testExpectedSystemTables() {
+    var testedSystemTableIds =
+        Set.of(AccumuloTable.ROOT.tableId(), AccumuloTable.METADATA.tableId(),
+            AccumuloTable.FATE.tableId(), AccumuloTable.SCAN_REF.tableId());
+    assertEquals(testedSystemTableIds, AccumuloTable.allTableIds(),
+        "There are newly added system tables that are untested in this IT. 
Ensure each "
+            + "test has testing for the new system table");
+  }
+
+  @Test
+  public void test_list_tableIdMap() throws Exception {
+    // Thoroughly tested elsewhere, but simple enough to test here. Test that 
all system tables
+    // and user tables are returned
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    var expected = 
Arrays.stream(AccumuloTable.values()).map(AccumuloTable::tableName)
+        .collect(Collectors.toSet());
+    expected.add(userTable);
+    assertEquals(expected, ops.list());
+    assertEquals(expected, ops.tableIdMap().keySet());
+  }
+
+  @Test
+  public void test_exists() throws Exception {
+    // Thoroughly tested elsewhere, but simple enough to test here. Test that 
all system tables
+    // and user tables exist
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    var expected = 
Arrays.stream(AccumuloTable.values()).map(AccumuloTable::tableName)
+        .collect(Collectors.toSet());
+    expected.add(userTable);
+    for (String table : expected) {
+      assertTrue(ops.exists(table));
+    }
+  }
+
+  @Test
+  public void test_create() throws Exception {
+    // Creating user tables is thoroughly tested. Make sure we can't create 
any of the already
+    // existing system tables, though.
+    for (var systemTable : AccumuloTable.values()) {
+      assertThrows(AccumuloException.class, () -> 
ops.create(systemTable.tableName()));
+    }
+  }
+
+  @Test
+  public void test_exportTable_importTable() throws Exception {
+    // exportTable, importTable for user tables is tested in ImportExportIT. 
Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(ImportExportIT.class.getName()));
+    // exportTable, importTable untested for system tables. Test basic 
functionality here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    Path baseDir = ImportExportIT.createBaseDir(getCluster(), getClass());
+    var fs = getCluster().getFileSystem();
+
+    // export the user table, so we can test importing into the system tables
+    ops.offline(userTable, true);
+    Path exportUserDir;
+    exportUserDir = new Path(baseDir, "export_userdir");
+    fs.deleteOnExit(exportUserDir);
+    ops.exportTable(userTable, exportUserDir.toString());
+
+    var sysTables = AccumuloTable.values();
+    for (int i = 0; i < sysTables.length; i++) {
+      var sysTable = sysTables[i];
+      Path exportDir = new Path(baseDir, "export" + i);
+      fs.deleteOnExit(exportDir);
+
+      // not offline, can't export
+      assertThrows(IllegalStateException.class,
+          () -> ops.exportTable(sysTable.tableName(), exportDir.toString()));
+
+      switch (sysTable) {
+        case ROOT:
+        case METADATA:
+          // can't offline, so will never be able to export
+          assertThrows(AccumuloException.class, () -> 
ops.offline(sysTable.tableName(), true));
+          assertThrows(AccumuloException.class,
+              () -> ops.importTable(sysTable.tableName(), 
exportUserDir.toString()));
+          break;
+        case FATE:
+        case SCAN_REF:
+          ops.offline(sysTable.tableName(), true);
+          try {
+            assertThrows(AccumuloException.class,
+                () -> ops.exportTable(sysTable.tableName(), 
exportDir.toString()));
+            assertThrows(AccumuloException.class,
+                () -> ops.importTable(sysTable.tableName(), 
exportUserDir.toString()));
+          } finally {
+            ops.online(sysTable.tableName(), true);
+          }
+          break;
+        default:
+          throw new IllegalStateException("Unrecognized table: " + sysTable);
+      }
+    }
+  }
+
+  @Test
+  public void test_addSplits_putSplits_listSplits_splitRangeByTablets() throws 
Exception {
+    // note that addSplits and putSplits are implemented the same, just take 
different args. No
+    // need to test both
+
+    // addSplits, listSplits tested elsewhere for METADATA, ROOT, and user 
tables, but testing here
+    // as well since this setup is needed to test for splitRangeByTablets 
anyway, which is untested
+    // elsewhere
+
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    // system and user tables
+    var allTables = new ArrayList<>(ops.list());
+
+    for (int i = 0; i < allTables.size(); i++) {
+      SortedSet<Text> splits = new TreeSet<>();
+      splits.add(new Text(i + ""));
+      String table = allTables.get(i);
+
+      if (table.equals(AccumuloTable.ROOT.tableName())) {
+        // cannot add splits to ROOT
+        assertThrows(AccumuloException.class, () -> ops.addSplits(table, 
splits));
+        assertEquals(0, ops.listSplits(table).size());
+      } else {
+        ops.addSplits(table, splits);
+        assertTrue(ops.listSplits(table).containsAll(splits));
+      }
+
+      assertEquals(ops.splitRangeByTablets(table, new Range(), 99).size(),
+          ops.listSplits(table).size() + 1);
+    }
+  }
+
+  @Test
+  public void test_locate() throws Exception {
+    // locate for user tables is tested in LocatorIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(LocatorIT.class.getName()));
+    // locate for METADATA and ROOT tables is tested in ManagerAssignmentIT. 
Ensure test exists
+    assertDoesNotThrow(() -> 
Class.forName(ManagerAssignmentIT.class.getName()));
+
+    // basic functionality check for locate on FATE and SCAN_REF tables
+    var fateLocations =
+        ops.locate(AccumuloTable.FATE.tableName(), 
Collections.singletonList(new Range()));
+    var fateGroupByTablet = fateLocations.groupByTablet().keySet();
+    assertFalse(fateGroupByTablet.isEmpty());
+    fateGroupByTablet.forEach(tid -> {
+      var tabletLoc = fateLocations.getTabletLocation(tid);
+      assertNotNull(tabletLoc);
+      assertTrue(tabletLoc.contains(":"));
+    });
+    var scanRefLocations =
+        ops.locate(AccumuloTable.SCAN_REF.tableName(), 
Collections.singletonList(new Range()));
+    var scanRefGroupByTablet = scanRefLocations.groupByTablet().keySet();
+    assertFalse(scanRefGroupByTablet.isEmpty());
+    scanRefGroupByTablet.forEach(tid -> {
+      var tabletLoc = scanRefLocations.getTabletLocation(tid);
+      assertNotNull(tabletLoc);
+      assertTrue(tabletLoc.contains(":"));
+    });
+  }
+
+  @Test
+  public void test_getMaxRow_deleteRows() throws Exception {
+    // getMaxRow for user tables is tested in FindMaxIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(FindMaxIT.class.getName()));
+    // getMaxRow not tested for system tables. Test basic functionality here
+    userTable = getUniqueNames(1)[0];
+    ops.create(userTable);
+    createFateTableRow(userTable);
+    createScanRefTableRow();
+    for (var sysTable : AccumuloTable.values()) {
+      var maxRow =
+          ops.getMaxRow(sysTable.tableName(), Authorizations.EMPTY, null, 
true, null, true);
+      log.info("Max row of {} : {}", sysTable, maxRow);
+      assertNotNull(maxRow);
+    }
+
+    // deleteRows for user tables is tested in DeleteRowsIT. Ensure test exists
+    assertDoesNotThrow(() -> Class.forName(DeleteRowsIT.class.getName()));
+    // deleteRows not tested for system tables. Test basic functionality here
+    for (var sysTable : AccumuloTable.values()) {
+      switch (sysTable) {
+        case METADATA:
+        case ROOT:
+          // should not be able to delete rows of METADATA or ROOT tables
+          assertThrows(AccumuloException.class,
+              () -> ops.deleteRows(sysTable.tableName(), null, null));
+          break;
+        case FATE:
+          break;

Review Comment:
   Created https://github.com/apache/accumulo/pull/5491
   
   When testing this, noticed you can call `client.tableOperations.deleteRows` 
with no problems for FATE and SCAN_REF, but trying to use a batch writer 
   (`BatchWriter bw = 
client.createBatchWriter(AccumuloTable.SCAN_REF.tableName())` for example) you 
cannot write mutations this way:
   `org.apache.accumulo.core.client.MutationsRejectedException: constraint 
violation codes : []  security codes: 
{accumulo.scanref(ID:+scanref)=[PERMISSION_DENIED]}  # server errors 0 # 
exceptions 0`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@accumulo.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to