kevinrr888 commented on code in PR #4452:
URL: https://github.com/apache/accumulo/pull/4452#discussion_r1570870229
##########
test/src/main/java/org/apache/accumulo/test/compaction/ExternalCompactionProgressIT.java:
##########
@@ -97,14 +104,90 @@ public void testProgress() throws Exception {
client.tableOperations().attachIterator(table1, setting,
EnumSet.of(IteratorUtil.IteratorScope.majc));
log.info("Compacting table");
- compact(client, table1, 2, "DCQ1", true);
+ compact(client, table1, 2, QUEUE1, true);
verify(client, table1, 2, ROWS);
log.info("Done Compacting table");
compactionFinished.set(true);
checkerThread.join();
verifyProgress();
+ } finally {
+ getCluster().getClusterControl().stopAllServers(ServerType.COMPACTOR);
+
getCluster().getClusterControl().stopAllServers(ServerType.COMPACTION_COORDINATOR);
+ }
+ }
+
+ @Test
+ public void testProgressWithBulkImport() throws Exception {
+ /*
+ * Tests the progress of an external compaction done on a table with bulk
imported files.
+ * Progress should stay 0-100. There was previously a bug with the
Compactor showing a >100%
+ * progress for compactions with bulk import files.
+ */
+ String[] tableNames = getUniqueNames(2);
+ String tableName1 = tableNames[0];
+ String tableName2 = tableNames[1];
+
+ try (AccumuloClient client =
Accumulo.newClient().from(getClientProps()).build()) {
+ log.info("Creating table " + tableName1);
+ createTable(client, tableName1, "cs1");
+ log.info("Creating table " + tableName2);
+ createTable(client, tableName2, "cs1");
+ log.info("Writing " + ROWS + " rows to table " + tableName1);
+ writeData(client, tableName1, ROWS);
+ log.info("Writing " + ROWS + " rows to table " + tableName2);
+ writeData(client, tableName2, ROWS);
+ // This is done to avoid system compactions
+ client.tableOperations().setProperty(tableName1,
Property.TABLE_MAJC_RATIO.getKey(), "1000");
+ client.tableOperations().setProperty(tableName2,
Property.TABLE_MAJC_RATIO.getKey(), "1000");
+
+
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
+ getCluster().getClusterControl().startCompactors(Compactor.class, 1,
QUEUE1);
+
+ String dir = getDir(client, tableName1);
+
+ log.info("Bulk importing files in dir " + dir + " to table " +
tableName2);
+ client.tableOperations().importDirectory(dir).to(tableName2).load();
+ log.info("Finished bulk import");
+
+ log.info("Starting a compaction progress checker thread");
+ Thread checkerThread = startChecker();
+ checkerThread.start();
+
+ log.info("Attaching a slow iterator to table " + tableName2);
+ IteratorSetting setting = new IteratorSetting(50, "Slow",
SlowIterator.class);
+ SlowIterator.setSleepTime(setting, 1);
+ client.tableOperations().attachIterator(tableName2, setting,
Review Comment:
I see. There are other places where this is done as well--setting it on the
table and then compacting. I can fix these in another PR, or in this PR.
Whichever you think would be better.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]