Apache9 commented on a change in pull request #1322: HBASE-24033 Add ut for
loading the corrupt recovered hfiles
URL: https://github.com/apache/hbase/pull/1322#discussion_r396084244
##########
File path:
hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java
##########
@@ -163,24 +175,88 @@ private WAL createWAL(Configuration c, Path
hbaseRootDir, String logName) throws
return wal;
}
- /**
- * Test writing edits into an HRegion, closing it, splitting logs, opening
- * Region again. Verify seqids.
- */
- @Test
- public void testReplayEditsWrittenViaHRegion()
- throws IOException, SecurityException, IllegalArgumentException,
InterruptedException {
+ private Pair<TableDescriptor, RegionInfo> setupTableAndRegion() throws
IOException {
final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
final TableDescriptor td = createBasic3FamilyTD(tableName);
final RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).build();
final Path tableDir = FSUtils.getTableDir(this.rootDir, tableName);
deleteDir(tableDir);
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir,
td, false);
- final byte[] rowName = tableName.getName();
- final int countPerFamily = 10;
+ HRegion region = HBaseTestingUtility.createRegionAndWAL(ri, rootDir,
this.conf, td);
+ HBaseTestingUtility.closeRegionAndWAL(region);
+ return new Pair<>(td, ri);
+ }
+
+ @Test
+ public void testCorruptRecoveredHFile() throws Exception {
+ Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
+ TableDescriptor td = pair.getFirst();
+ RegionInfo ri = pair.getSecond();
+
+ WAL wal = createWAL(this.conf, rootDir, logName);
+ HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td,
wal);
+ final long timestamp = this.ee.currentTime();
+ // Write data and flush
+ for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+ region.put(new Put(ROW).addColumn(cfd.getName(), Bytes.toBytes("x"),
timestamp, VALUE1));
+ }
+ region.flush(true);
+
+ // Now assert edits made it in.
+ Result result1 = region.get(new Get(ROW));
+ assertEquals(td.getColumnFamilies().length, result1.size());
+ for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+ assertTrue(Bytes.equals(VALUE1, result1.getValue(cfd.getName(),
Bytes.toBytes("x"))));
+ }
+
+ // Now close the region
+ region.close(true);
+ wal.shutdown();
+ // split the log
+ WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf),
this.conf, wals);
+
+ // Write a corrupt recovered hfile
+ Path regionDir =
+ new Path(CommonFSUtils.getTableDir(rootDir, td.getTableName()),
ri.getEncodedName());
+ for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+ FileStatus[] files =
+ WALSplitUtil.getRecoveredHFiles(this.fs, regionDir,
cfd.getNameAsString());
+ assertNotNull(files);
+ assertTrue(files.length > 0);
+ writeCorruptRecoveredHFile(files[0].getPath());
+ }
+
+ // Failed to reopen the region
+ WAL wal2 = createWAL(this.conf, rootDir, logName);
+ try {
+ HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal2);
+ fail("Should fail to open region");
+ } catch (CorruptHFileException che) {
+ // Expected
+ }
+
+ // Set skip errors to true and reopen the region
+ this.conf.setBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS, true);
+ HRegion region2 = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td,
wal2);
+ Result result2 = region2.get(new Get(ROW));
+ assertEquals(td.getColumnFamilies().length, result2.size());
+ for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+ assertTrue(Bytes.equals(VALUE1, result2.getValue(cfd.getName(),
Bytes.toBytes("x"))));
+ }
+ this.conf.setBoolean(HConstants.HREGION_EDITS_REPLAY_SKIP_ERRORS, false);
+ }
Review comment:
Add an assert to confirm that we move the broken HFile to the expected place?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services