cshannon commented on code in PR #3640:
URL: https://github.com/apache/accumulo/pull/3640#discussion_r1277748275
##########
test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java:
##########
@@ -102,6 +119,146 @@ public void mergeSize() throws Exception {
}
}
+ @Test
+ public void noChopMergeTest() throws Exception {
+ noChopMergeTest(false);
+ }
+
+ @Test
+ public void noChopMergeTestCompactTrue() throws Exception {
+ noChopMergeTest(true);
+ }
+
+ private void noChopMergeTest(boolean compact) throws Exception {
+ try (AccumuloClient c =
Accumulo.newClient().from(getClientProps()).build()) {
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ final TableId tableId =
TableId.of(c.tableOperations().tableIdMap().get(tableName));
+
+ // First write 1000 rows to a file in the default tablet
+ ingest(c, 1000, 1, tableName);
+ c.tableOperations().flush(tableName, null, null, true);
+
+ System.out.println("\nMetadata after Ingest");
+ printAndVerifyFileMetadata(c, tableId, 1);
+
+ // Add splits so we end up with 4 tablets
+ final SortedSet<Text> splits = new TreeSet<>();
+ for (int i = 250; i <= 750; i += 250) {
+ splits.add(new Text("row_" + String.format("%010d", i)));
+ }
+ c.tableOperations().addSplits(tableName, splits);
+
+ System.out.println("Metadata after Split");
+ verify(c, 1000, 1, tableName);
+ printAndVerifyFileMetadata(c, tableId, 4);
+
+ // Go through and delete two blocks of rows, 101 - 200
+ // and also 301 - 400 so we can test that the data doesn't come
+ // back on merge
+ try (BatchWriter bw = c.createBatchWriter(tableName)) {
+ byte[] COL_PREFIX = "col_".getBytes(UTF_8);
+ Text colq = new Text(FastFormat.toZeroPaddedString(0, 7, 10,
COL_PREFIX));
+
+ for (int i = 101; i <= 200; i++) {
+ Mutation m = new Mutation(new Text("row_" + String.format("%010d",
i)));
+ m.putDelete(new Text("colf"), colq);
+ bw.addMutation(m);
+ }
+ for (int i = 301; i <= 400; i++) {
+ Mutation m = new Mutation(new Text("row_" + String.format("%010d",
i)));
+ m.putDelete(new Text("colf"), colq);
+ bw.addMutation(m);
+ }
+ }
+
+ System.out.println("Metadata after deleting rows 101 - 200 and 301 -
400");
+ c.tableOperations().compact(tableName,
+ new CompactionConfig().setEndRow(splits.first()).setWait(true));
+ c.tableOperations().flush(tableName, null, null, true);
+ printAndVerifyFileMetadata(c, tableId, 5);
+
+ // Optionally compact before merge
+ if (compact) {
+ c.tableOperations().compact(tableName, new
CompactionConfig().setWait(true));
+ }
Review Comment:
I was just trying to make sure the test passed both with compacting the
table and not compacting as it changes the rfiles on the system. I was thinking
there could be something weird where maybe merge works if we don't compact and
doesn't if we do or vice versa.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]