Author: jdcryans Date: Fri May 7 20:56:24 2010 New Revision: 942219 URL: http://svn.apache.org/viewvc?rev=942219&view=rev Log: HBASE-2503 PriorityQueue isn't thread safe, KeyValueHeap uses it that way
Modified: hadoop/hbase/branches/0.20/CHANGES.txt hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java hadoop/hbase/branches/0.20/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java Modified: hadoop/hbase/branches/0.20/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/CHANGES.txt?rev=942219&r1=942218&r2=942219&view=diff ============================================================================== --- hadoop/hbase/branches/0.20/CHANGES.txt (original) +++ hadoop/hbase/branches/0.20/CHANGES.txt Fri May 7 20:56:24 2010 @@ -120,6 +120,7 @@ Release X.X.X - Unreleased (Ryan Rawson via Stack) HBASE-2513 hbase-2414 added bug where we'd tight-loop if no root available + HBASE-2503 PriorityQueue isn't thread safe, KeyValueHeap uses it that way IMPROVEMENTS HBASE-2180 Bad read performance from synchronizing hfile.fddatainputstream Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=942219&r1=942218&r2=942219&view=diff ============================================================================== --- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original) +++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri May 7 20:56:24 2010 @@ -33,6 +33,7 @@ package org.apache.hadoop.hbase.regionse import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; + import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -1823,6 +1824,7 @@ public class HRegion implements HConstan * @return Row that goes with <code>lockid</code> */ byte [] getRowFromLock(final Integer lockid) { + // Doesn't need to be volatile, always accessed under a sync'ed method synchronized (lockedRows) { return lockIds.get(lockid); } @@ -1925,6 +1927,7 @@ public class HRegion implements HConstan private int batch; private Scan theScan = null; private List<KeyValueScanner> extraScanners = null; + private boolean filterClosed = false; RegionScanner(Scan scan, List<KeyValueScanner> additionalScanners) { //DebugPrint.println("HRegionScanner.<init>"); @@ -1975,7 +1978,13 @@ public class HRegion implements HConstan ReadWriteConsistencyControl.resetThreadReadPoint(rwcc); } - public boolean next(List<KeyValue> outResults, int limit) throws IOException { + public synchronized boolean next(List<KeyValue> outResults, int limit) + throws IOException { + if (this.filterClosed) { + throw new UnknownScannerException("Scanner was closed (timed out?) " + + "after we renewed it. Could be caused by a very slow scanner " + + "or a lengthy garbage collection"); + } if (closing.get() || closed.get()) { close(); throw new NotServingRegionException(regionInfo.getRegionNameAsString() + @@ -2002,7 +2011,8 @@ public class HRegion implements HConstan return returnResult; } - public boolean next(List<KeyValue> outResults) throws IOException { + public synchronized boolean next(List<KeyValue> outResults) + throws IOException { // apply the batching limit by default return next(outResults, batch); } @@ -2010,7 +2020,7 @@ public class HRegion implements HConstan /* * @return True if a filter rules the scanner is over, done. */ - boolean isFilterDone() { + synchronized boolean isFilterDone() { return (this.filter != null && this.filter.filterAllRemaining()) || (this.oldFilter != null && oldFilter.filterAllRemaining()); @@ -2088,22 +2098,13 @@ public class HRegion implements HConstan (oldFilter != null && this.oldFilter.filterRowKey(row, 0, row.length)); } - public void close() { + public synchronized void close() { + this.filterClosed = true; if (storeHeap != null) { storeHeap.close(); storeHeap = null; } } - - /** - * - * @param scanner to be closed - */ - public void close(KeyValueScanner scanner) { - try { - scanner.close(); - } catch(NullPointerException npe) {} - } } // Utility methods Modified: hadoop/hbase/branches/0.20/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=942219&r1=942218&r2=942219&view=diff ============================================================================== --- hadoop/hbase/branches/0.20/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java (original) +++ hadoop/hbase/branches/0.20/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java Fri May 7 20:56:24 2010 @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HRegionIn import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -212,6 +213,34 @@ public class TestScanner extends HBaseTe } } + /** + * Test that closing a scanner while a client is using it doesn't throw + * NPEs but instead a UnknownScannerException. HBASE-2503 + * @throws Exception + */ + public void testRaceBetweenClientAndTimeout() throws Exception { + try { + this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null); + addContent(this.r, HConstants.CATALOG_FAMILY); + Scan scan = new Scan(); + InternalScanner s = r.getScanner(scan); + List<KeyValue> results = new ArrayList<KeyValue>(); + try { + s.next(results); + s.close(); + s.next(results); + fail("We don't want anything more, we should be failing"); + } catch (UnknownScannerException ex) { + // ok! + return; + } + } finally { + this.r.close(); + this.r.getLog().closeAndDelete(); + shutdownDfs(this.cluster); + } + } + /** The test! * @throws IOException */