ddanielr commented on code in PR #5863:
URL: https://github.com/apache/accumulo/pull/5863#discussion_r2330935037


##########
test/src/main/java/org/apache/accumulo/test/functional/SplitIT.java:
##########
@@ -281,4 +288,132 @@ public void bulkImportThatCantSplitHangsCompaction() 
throws Exception {
       assertEquals(1000, c.createScanner(tableName).stream().count());
     }
   }
+
+  /**
+   * The following test will pass successfully but result in an 
FileNotFoundException error message
+   * being throw in the TabletServer logs. Additional changes to this test 
should check the tablet
+   * server log file for the error message.
+   *
+   * @throws Exception throws a general exception
+   */
+  @Test
+  public void testFileNotFoundExceptionDuringSplitComputations() throws 
Exception {
+    try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
+      String tableName = getUniqueNames(1)[0];
+
+      // Set small split threshold and disable compression to ensure splitting
+      client.tableOperations().create(tableName,
+          new 
NewTableConfiguration().setProperties(Map.of(Property.TABLE_SPLIT_THRESHOLD.getKey(),
+              "10K", Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K",
+              Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none")));
+
+      try (BatchWriter bw = client.createBatchWriter(tableName)) {
+        for (int i = 0; i < 10_000; i++) {
+          Mutation m = new Mutation(String.format("%08d", i));
+          m.put("cf", "cq", "value" + i);
+          bw.addMutation(m);
+        }
+      }
+
+      // flush and wait
+      client.tableOperations().flush(tableName, null, null, true);
+
+      TableId tableId = 
TableId.of(client.tableOperations().tableIdMap().get(tableName));
+
+      StoredTabletFile fileToDelete = null;
+      try (TabletsMetadata tabletsMetadata =
+          
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
+        for (TabletMetadata tm : tabletsMetadata) {
+          log.info("Tablet {} directory {} has {} files in it", 
tm.getExtent(), tm.getDirName(),
+              tm.getFiles().size());
+          if (tm.getEndRow() == null) {
+            log.info("found default {} tablet extent of {}", tm.getExtent(), 
tm.getPrevEndRow());
+          } else {
+            fileToDelete = tm.getFiles().iterator().next();
+            break;
+          }
+        }
+      }
+
+      Objects.requireNonNull(fileToDelete);
+
+      final Path deletePath = fileToDelete.getPath();
+
+      Thread deleterThread = new Thread(() -> {
+        try {
+          Thread.sleep(1_000);
+          FileSystem fs = getFileSystem();
+
+          if (fs.exists(deletePath)) {
+            log.info("Deleting file {} to trigger FileNotFound exception", 
deletePath);
+            fs.delete(deletePath, false);
+          }
+        } catch (Exception e) {
+          log.error("Error deleting file", e);
+        }
+      });
+      deleterThread.start();
+
+      try (BatchWriter bw = client.createBatchWriter(tableName)) {
+        for (int i = 10_000; i < 20_000; i++) {
+          Mutation m = new Mutation(String.format("%08d", i));
+          m.put("cf", "cq", "value" + i);
+          bw.addMutation(m);
+        }
+      }
+
+      deleterThread.join(5_000);
+      client.tableOperations().flush(tableName, null, null, true);
+    }
+  }
+
+  /**
+   * Attempts to trigger a compaction while a table split point calculation is 
on going
+   *
+   * @throws Exception throws a general exception
+   */
+
+  @Test
+  public void testCompactionDuringSplitComputations() throws Exception {

Review Comment:
   This test is currently not working as expected. I can't get the timing 
correct to have a compaction complete while a getSplitComputations call is 
happening. 
   
   Might need to refactor this into a mock test under a test tserver to try and 
get the file counts to be modified. 
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@accumulo.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to