Author: stack
Date: Mon Mar 31 12:26:47 2008
New Revision: 643110
URL: http://svn.apache.org/viewvc?rev=643110&view=rev
Log:
HBASE-550 EOF trying to read reconstruction log stops region deployment
M src/java/org/apache/hadoop/hbase/HStore.java
(Constructor) If an exception out of reconstructionLog method, log it and
keep going. Presumption is that its result of a lack of HADOOP--1700.
(reconstructionLog): Check for empty log file.
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=643110&r1=643109&r2=643110&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Mar 31 12:26:47 2008
@@ -105,6 +105,7 @@
HBASE-529 RegionServer needs to recover if datanode goes down
HBASE-456 Clearly state which ports need to be opened in order to run
HBase
HBASE-536 Remove MiniDFS startup from MiniHBaseCluster
+ HBASE-521 Improve client scanner interface
Branch 0.1
Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=643110&r1=643109&r2=643110&view=diff
==============================================================================
---
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
(original)
+++
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
Mon Mar 31 12:26:47 2008
@@ -44,6 +44,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -206,10 +207,7 @@
}
if(LOG.isDebugEnabled()) {
- LOG.debug("starting " + storeName +
- ((reconstructionLog == null || !fs.exists(reconstructionLog)) ?
- " (no reconstruction log)" :
- " with reconstruction log: " + reconstructionLog.toString()));
+ LOG.debug("starting " + storeName);
}
// Go through the 'mapdir' and 'infodir' together, make sure that all
@@ -236,7 +234,16 @@
this.maxSeqId);
}
- doReconstructionLog(reconstructionLog, maxSeqId);
+ try {
+ doReconstructionLog(reconstructionLog, maxSeqId);
+ } catch (IOException e) {
+ // Presume we got here because of some HDFS issue or because of a lack of
+ // HADOOP-1700; for now keep going but this is probably not what we want
+ // long term. If we got here there has been data-loss
+ LOG.warn("Exception processing reconstruction log " + reconstructionLog +
+ " opening " + this.storeName +
+ " -- continuing. Probably DATA LOSS!", e);
+ }
// By default, we compact if an HStore has more than
// MIN_COMMITS_FOR_COMPACTION map files
@@ -303,11 +310,16 @@
private void doReconstructionLog(final Path reconstructionLog,
final long maxSeqID)
throws UnsupportedEncodingException, IOException {
-
if (reconstructionLog == null || !fs.exists(reconstructionLog)) {
// Nothing to do.
return;
}
+ // Check its not empty.
+ FileStatus[] stats = fs.listStatus(reconstructionLog);
+ if (stats == null || stats.length == 0) {
+ LOG.warn("Passed reconstruction log " + reconstructionLog + " is
zero-length");
+ return;
+ }
long maxSeqIdInLog = -1;
TreeMap<HStoreKey, byte []> reconstructedCache =
new TreeMap<HStoreKey, byte []>();
@@ -1691,4 +1703,4 @@
}
}
}
-}
\ No newline at end of file
+}