Author: jimk Date: Tue Dec 4 14:17:21 2007 New Revision: 601111 URL: http://svn.apache.org/viewvc?rev=601111&view=rev Log: HADOOP-2333 Client side retries happen at the wrong level
Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original) +++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Tue Dec 4 14:17:21 2007 @@ -88,6 +88,7 @@ HADOOP-2339 Delete command with no WHERE clause (Edward Yoon via Stack) HADOOP-2299 Support inclusive scans (Bryan Duxbury via Stack) + HADOOP-2333 Client side retries happen at the wrong level Release 0.15.1 Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HBaseAdmin.java Tue Dec 4 14:17:21 2007 @@ -114,8 +114,24 @@ throws IOException { createTableAsync(desc); - // Wait for new table to come on-line - connection.getTableServers(desc.getName()); + for (int tries = 0; tries < numRetries; tries++) { + try { + // Wait for new table to come on-line + connection.getTableServers(desc.getName()); + break; + + } catch (TableNotFoundException e) { + if (tries == numRetries - 1) { + // Ran out of tries + throw e; + } + } + try { + Thread.sleep(pause); + } catch (InterruptedException e) { + // continue + } + } } /** Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java Tue Dec 4 14:17:21 2007 @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -56,8 +57,8 @@ // Note that although the Map is synchronized, the objects it contains // are mutable and hence require synchronized access to them - private static final Map<String, HConnection> HBASE_INSTANCES = - Collections.synchronizedMap(new HashMap<String, HConnection>()); + private static final Map<String, TableServers> HBASE_INSTANCES = + Collections.synchronizedMap(new HashMap<String, TableServers>()); /** * Get the connection object for the instance specified by the configuration @@ -66,7 +67,7 @@ * @return HConnection object for the instance specified by the configuration */ public static HConnection getConnection(HBaseConfiguration conf) { - HConnection connection; + TableServers connection; synchronized (HBASE_INSTANCES) { String instanceName = conf.get(HBASE_DIR, DEFAULT_HBASE_DIR); @@ -86,20 +87,24 @@ */ public static void deleteConnection(HBaseConfiguration conf) { synchronized (HBASE_INSTANCES) { - HBASE_INSTANCES.remove(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); + TableServers instance = + HBASE_INSTANCES.remove(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); + if (instance != null) { + instance.closeAll(); + } } } /* encapsulates finding the servers for an HBase instance */ private static class TableServers implements HConnection, HConstants { - private static final Log LOG = LogFactory.getLog(TableServers.class. - getName()); + private static final Log LOG = LogFactory.getLog(TableServers.class); private final Class<? extends HRegionInterface> serverInterfaceClass; private final long threadWakeFrequency; private final long pause; private final int numRetries; private final Integer masterLock = new Integer(0); + private volatile boolean closed; private volatile HMasterInterface master; private volatile boolean masterChecked; @@ -131,6 +136,8 @@ String serverClassName = conf.get(REGION_SERVER_CLASS, DEFAULT_REGION_SERVER_CLASS); + this.closed = false; + try { this.serverInterfaceClass = (Class<? extends HRegionInterface>) Class.forName(serverClassName); @@ -161,7 +168,9 @@ public HMasterInterface getMaster() throws MasterNotRunningException { synchronized (this.masterLock) { for (int tries = 0; - !this.masterChecked && this.master == null && tries < numRetries; + !this.closed && + !this.masterChecked && this.master == null && + tries < numRetries; tries++) { HServerAddress masterLocation = new HServerAddress(this.conf.get( @@ -387,14 +396,16 @@ } if (closedTables.contains(tableName)) { - throw new IllegalStateException("table already closed: " + tableName); + // Table already closed. Ignore it. + return; } SortedMap<Text, HRegionLocation> tableServers = tablesToServers.remove(tableName); if (tableServers == null) { - throw new IllegalArgumentException("table not open: " + tableName); + // Table not open. Ignore it. + return; } closedTables.add(tableName); @@ -408,6 +419,14 @@ } } + void closeAll() { + this.closed = true; + ArrayList<Text> tables = new ArrayList<Text>(tablesToServers.keySet()); + for (Text tableName: tables) { + close(tableName); + } + } + /* * Clears the cache of all known information about the specified table and * locates a table by searching the META or ROOT region (as appropriate) or @@ -437,13 +456,12 @@ // region at the same time. One will go do the find while the // second waits. The second thread will not do find. - SortedMap<Text, HRegionLocation> tableServers = - this.tablesToServers.get(ROOT_TABLE_NAME); + srvrs = this.tablesToServers.get(ROOT_TABLE_NAME); - if (tableServers == null) { - tableServers = locateRootRegion(); + if (srvrs == null) { + srvrs = locateRootRegion(); } - srvrs.putAll(tableServers); + this.tablesToServers.put(tableName, srvrs); } } else if (tableName.equals(META_TABLE_NAME)) { @@ -452,29 +470,30 @@ // region at the same time. The first will load the meta region and // the second will use the value that the first one found. - if (tablesToServers.get(ROOT_TABLE_NAME) == null) { - findServersForTable(ROOT_TABLE_NAME); - } - - SortedMap<Text, HRegionLocation> tableServers = - this.tablesToServers.get(META_TABLE_NAME); + SortedMap<Text, HRegionLocation> rootServers = + tablesToServers.get(ROOT_TABLE_NAME); - if (tableServers == null) { - for (int tries = 0; tries < numRetries; tries++) { - try { - tableServers = loadMetaFromRoot(); - break; - - } catch (IOException e) { - if (tries < numRetries - 1) { - findServersForTable(ROOT_TABLE_NAME); - continue; - } + for (boolean refindRoot = true; refindRoot; ) { + if (rootServers == null || rootServers.size() == 0) { + // (re)find the root region + rootServers = findServersForTable(ROOT_TABLE_NAME); + // but don't try again + refindRoot = false; + } + try { + srvrs = getTableServers(rootServers, META_TABLE_NAME); + break; + + } catch (NotServingRegionException e) { + if (!refindRoot) { + // Already found root once. Give up. throw e; } + // The root region must have moved - refind it + rootServers.clear(); } } - srvrs.putAll(tableServers); + this.tablesToServers.put(tableName, srvrs); } } else { boolean waited = false; @@ -506,18 +525,30 @@ } if (!waited) { try { - SortedMap<Text, HRegionLocation> metaServers = - this.tablesToServers.get(META_TABLE_NAME); - if (metaServers == null) { + SortedMap<Text, HRegionLocation> metaServers = + this.tablesToServers.get(META_TABLE_NAME); + + for (boolean refindMeta = true; refindMeta; ) { + if (metaServers == null || metaServers.size() == 0) { + // (re)find the meta table metaServers = findServersForTable(META_TABLE_NAME); + // but don't try again + refindMeta = false; } - Text firstMetaRegion = metaServers.headMap(tableName).lastKey(); - metaServers = metaServers.tailMap(firstMetaRegion); + try { + srvrs = getTableServers(metaServers, tableName); + break; - for (HRegionLocation t: metaServers.values()) { - srvrs.putAll(scanOneMetaRegion(t, tableName)); + } catch (NotServingRegionException e) { + if (!refindMeta) { + // Already refound meta once. Give up. + throw e; + } + // The meta table must have moved - refind it + metaServers.clear(); } - + } + this.tablesToServers.put(tableName, srvrs); } finally { synchronized (this.tablesBeingLocated) { // Wake up the threads waiting for us to find the table @@ -532,22 +563,6 @@ } /* - * Load the meta table from the root table. - * - * @return map of first row to TableInfo for all meta regions - * @throws IOException - */ - private SortedMap<Text, HRegionLocation> loadMetaFromRoot() - throws IOException { - - SortedMap<Text, HRegionLocation> rootRegion = - this.tablesToServers.get(ROOT_TABLE_NAME); - - return scanOneMetaRegion( - rootRegion.get(rootRegion.firstKey()), META_TABLE_NAME); - } - - /* * Repeatedly try to find the root region by asking the master for where it is * @return TreeMap<Text, TableInfo> for root regin if found * @throws NoServerForRegionException - if the root region can not be located @@ -629,6 +644,58 @@ return rootServer; } + + /* + * @param metaServers the meta servers that would know where the table is + * @param tableName name of the table + * @return map of region start key -> server location + * @throws IOException + */ + private SortedMap<Text, HRegionLocation> getTableServers( + final SortedMap<Text, HRegionLocation> metaServers, + final Text tableName) throws IOException { + + // If there is more than one meta server, find the first one that should + // know about the table we are looking for, and reduce the number of + // servers we need to query. + + SortedMap<Text, HRegionLocation> metaServersForTable = metaServers; + if (metaServersForTable.size() > 1) { + Text firstMetaRegion = metaServersForTable.headMap(tableName).lastKey(); + metaServersForTable = metaServersForTable.tailMap(firstMetaRegion); + } + + SortedMap<Text, HRegionLocation> tableServers = + new TreeMap<Text, HRegionLocation>(); + + int tries = 0; + do { + if (tries >= numRetries - 1) { + throw new NoServerForRegionException( + "failed to find server for " + tableName + " after " + + numRetries + " retries"); + + } else if (tries > 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("Sleeping. Table " + tableName + + " not currently being served."); + } + try { + Thread.sleep(pause); + } catch (InterruptedException ie) { + // continue + } + if (LOG.isDebugEnabled()) { + LOG.debug("Wake. Retry finding table " + tableName); + } + } + for (HRegionLocation t: metaServersForTable.values()) { + tableServers.putAll(scanOneMetaRegion(t, tableName)); + } + tries += 1; + } while (tableServers.size() == 0); + return tableServers; + } /* * Scans a single meta region @@ -637,7 +704,6 @@ * @return returns a map of startingRow to TableInfo * @throws TableNotFoundException - if table does not exist * @throws IllegalStateException - if table is offline - * @throws NoServerForRegionException - if table can not be found after retrying * @throws IOException */ private SortedMap<Text, HRegionLocation> scanOneMetaRegion( @@ -647,119 +713,94 @@ TreeMap<Text, HRegionLocation> servers = new TreeMap<Text, HRegionLocation>(); - for (int tries = 0; servers.size() == 0 && tries < numRetries; tries++) { - long scannerId = -1L; - try { - scannerId = server.openScanner(t.getRegionInfo().getRegionName(), + long scannerId = -1L; + try { + scannerId = server.openScanner(t.getRegionInfo().getRegionName(), COLUMN_FAMILY_ARRAY, tableName, System.currentTimeMillis(), null); - while (true) { - MapWritable values = server.next(scannerId); - if (values == null || values.size() == 0) { - if (servers.size() == 0) { - // If we didn't find any servers then the table does not exist - throw new TableNotFoundException("table '" + tableName + + while (true) { + MapWritable values = server.next(scannerId); + if (values == null || values.size() == 0) { + if (servers.size() == 0) { + // If we didn't find any servers then the table does not exist + throw new TableNotFoundException("table '" + tableName + "' does not exist in " + t); - } + } - // We found at least one server for the table and now we're done. - if (LOG.isDebugEnabled()) { - LOG.debug("Found " + servers.size() + " region(s) for " + + // We found at least one server for the table and now we're done. + if (LOG.isDebugEnabled()) { + LOG.debug("Found " + servers.size() + " region(s) for " + tableName + " at " + t); - } - break; } + break; + } - SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>(); - for (Map.Entry<Writable, Writable> e: values.entrySet()) { - HStoreKey key = (HStoreKey) e.getKey(); - results.put(key.getColumn(), - ((ImmutableBytesWritable) e.getValue()).get()); - } - - byte[] bytes = results.get(COL_REGIONINFO); - if (bytes == null || bytes.length == 0) { - // This can be null. Looks like an info:splitA or info:splitB - // is only item in the row. - if (LOG.isDebugEnabled()) { - LOG.debug(COL_REGIONINFO.toString() + " came back empty: " + + SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>(); + for (Map.Entry<Writable, Writable> e: values.entrySet()) { + HStoreKey key = (HStoreKey) e.getKey(); + results.put(key.getColumn(), + ((ImmutableBytesWritable) e.getValue()).get()); + } + + byte[] bytes = results.get(COL_REGIONINFO); + if (bytes == null || bytes.length == 0) { + // This can be null. Looks like an info:splitA or info:splitB + // is only item in the row. + if (LOG.isDebugEnabled()) { + LOG.debug(COL_REGIONINFO.toString() + " came back empty: " + results.toString()); - } - servers.clear(); - break; } - - HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable( + servers.clear(); + break; + } + + HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable( results.get(COL_REGIONINFO), new HRegionInfo()); - if (!regionInfo.getTableDesc().getName().equals(tableName)) { - // We're done - if (LOG.isDebugEnabled()) { - LOG.debug("Found " + servers.size() + " servers for table " + + if (!regionInfo.getTableDesc().getName().equals(tableName)) { + // We're done + if (LOG.isDebugEnabled()) { + LOG.debug("Found " + servers.size() + " servers for table " + tableName); - } - break; - } - - if (regionInfo.isSplit()) { - // Region is a split parent. Skip it. - continue; - } - - if (regionInfo.isOffline()) { - throw new IllegalStateException("table offline: " + tableName); } + break; + } - bytes = results.get(COL_SERVER); - if (bytes == null || bytes.length == 0) { - // We need to rescan because the table we want is unassigned. - if (LOG.isDebugEnabled()) { - LOG.debug("no server address for " + regionInfo.toString()); - } - servers.clear(); - break; - } - - String serverAddress = Writables.bytesToString(bytes); - servers.put(regionInfo.getStartKey(), new HRegionLocation( - regionInfo, new HServerAddress(serverAddress))); + if (regionInfo.isSplit()) { + // Region is a split parent. Skip it. + continue; } - } catch (IOException e) { - if (tries == numRetries - 1) { // no retries left - if (e instanceof RemoteException) { - e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); - } - throw e; + + if (regionInfo.isOffline()) { + throw new IllegalStateException("table offline: " + tableName); } - - } finally { - if (scannerId != -1L) { - try { - server.close(scannerId); - } catch (Exception ex) { - LOG.warn(ex); + + bytes = results.get(COL_SERVER); + if (bytes == null || bytes.length == 0) { + // We need to rescan because the table we want is unassigned. + if (LOG.isDebugEnabled()) { + LOG.debug("no server address for " + regionInfo.toString()); } + servers.clear(); + break; } + + String serverAddress = Writables.bytesToString(bytes); + servers.put(regionInfo.getStartKey(), new HRegionLocation( + regionInfo, new HServerAddress(serverAddress))); } - - if (servers.size() == 0 && tries == numRetries - 1) { - throw new NoServerForRegionException("failed to find server for " + - tableName + " after " + numRetries + " retries"); + } catch (IOException e) { + if (e instanceof RemoteException) { + e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); } + throw e; - if (servers.size() <= 0) { - // The table is not yet being served. Sleep and retry. - if (LOG.isDebugEnabled()) { - LOG.debug("Sleeping. Table " + tableName + - " not currently being served."); - } + } finally { + if (scannerId != -1L) { try { - Thread.sleep(pause); - } catch (InterruptedException ie) { - // continue - } - if (LOG.isDebugEnabled()) { - LOG.debug("Wake. Retry finding table " + tableName); + server.close(scannerId); + } catch (Exception ex) { + LOG.warn(ex); } } } Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMerge.java Tue Dec 4 14:17:21 2007 @@ -63,17 +63,19 @@ * @param tableName - Table to be compacted * @throws IOException */ - public static void merge(HBaseConfiguration conf, FileSystem fs, Text tableName) - throws IOException { + public static void merge(HBaseConfiguration conf, FileSystem fs, + Text tableName) throws IOException { + HConnection connection = HConnectionManager.getConnection(conf); boolean masterIsRunning = connection.isMasterRunning(); + HConnectionManager.deleteConnection(conf); if(tableName.equals(META_TABLE_NAME)) { - if(masterIsRunning) { - throw new IllegalStateException( - "Can not compact META table if instance is on-line"); - } - new OfflineMerger(conf, fs, META_TABLE_NAME).process(); - + if(masterIsRunning) { + throw new IllegalStateException( + "Can not compact META table if instance is on-line"); + } + new OfflineMerger(conf, fs, META_TABLE_NAME).process(); + } else { if(!masterIsRunning) { throw new IllegalStateException( Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Tue Dec 4 14:17:21 2007 @@ -63,13 +63,7 @@ */ public HBaseTestCase() { super(); - conf = new HBaseConfiguration(); - try { - START_KEY = - new String(START_KEY_BYTES, HConstants.UTF8_ENCODING) + PUNCTUATION; - } catch (UnsupportedEncodingException e) { - fail(); - } + init(); } /** @@ -77,6 +71,10 @@ */ public HBaseTestCase(String name) { super(name); + init(); + } + + private void init() { conf = new HBaseConfiguration(); try { START_KEY = Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java Tue Dec 4 14:17:21 2007 @@ -71,7 +71,7 @@ Logger rootLogger = Logger.getRootLogger(); rootLogger.setLevel(Level.WARN); - + Level logLevel = Level.DEBUG; value = System.getenv("LOGGING_LEVEL"); if(value != null && value.length() != 0) { @@ -112,6 +112,12 @@ } } LOG.setLevel(logLevel); + + if (!debugging) { + // Turn off all the filter logging unless debug is set. + // It is way too noisy. + Logger.getLogger("org.apache.hadoop.hbase.filter").setLevel(Level.INFO); + } } /** Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet2.java Tue Dec 4 14:17:21 2007 @@ -23,8 +23,6 @@ import java.util.Map; import java.util.TreeMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.hbase.filter.StopRowFilter; import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; @@ -36,9 +34,9 @@ * This class */ public class TestGet2 extends HBaseTestCase { - private final Log LOG = LogFactory.getLog(this.getClass().getName()); private MiniDFSCluster miniHdfs; + @Override protected void setUp() throws Exception { super.setUp(); this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null); Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java Tue Dec 4 14:17:21 2007 @@ -23,6 +23,13 @@ /** Tests region merging */ public class TestMergeMeta extends AbstractMergeTestBase { + + /** constructor */ + public TestMergeMeta() { + super(); + conf.setLong("hbase.client.pause", 1 * 1000); + conf.setInt("hbase.client.retries.number", 2); + } /** * test case Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java Tue Dec 4 14:17:21 2007 @@ -85,8 +85,10 @@ table.obtainScanner(new Text [] {new Text(FIRST_COLKEY + ":")}, HConstants.EMPTY_START_ROW, new Text(lastKey)); for (Map.Entry<HStoreKey, SortedMap<Text, byte []>> e: scanner) { - LOG.info(e.getKey()); - assertTrue(e.getKey().getRow().toString().compareTo(lastKey) < 0); + if(e.getKey().getRow().toString().compareTo(lastKey) >= 0) { + LOG.info(e.getKey()); + fail(); + } } } finally { table.close(); Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java Tue Dec 4 14:17:21 2007 @@ -142,9 +142,11 @@ */ @SuppressWarnings("static-access") public void testTableIndex() throws IOException { - long firstK = 32; - LOG.info("Print table contents before map/reduce"); - scanTable(conf, firstK); + boolean printResults = false; + if (printResults) { + LOG.info("Print table contents before map/reduce"); + } + scanTable(conf, printResults); @SuppressWarnings("deprecation") MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1); @@ -174,8 +176,10 @@ mrCluster.shutdown(); } - LOG.info("Print table contents after map/reduce"); - scanTable(conf, firstK); + if (printResults) { + LOG.info("Print table contents after map/reduce"); + } + scanTable(conf, printResults); // verify index results verify(conf); @@ -214,25 +218,25 @@ return c.toString(); } - private void scanTable(HBaseConfiguration c, long firstK) + private void scanTable(HBaseConfiguration c, boolean printResults) throws IOException { HTable table = new HTable(c, new Text(TABLE_NAME)); Text[] columns = { TEXT_INPUT_COLUMN, TEXT_OUTPUT_COLUMN }; HScannerInterface scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW); - long count = 0; try { HStoreKey key = new HStoreKey(); TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>(); while (scanner.next(key, results)) { - if (count < firstK) + if (printResults) { LOG.info("row: " + key.getRow()); + } for (Map.Entry<Text, byte[]> e : results.entrySet()) { - if (count < firstK) + if (printResults) { LOG.info(" column: " + e.getKey() + " value: " + new String(e.getValue(), HConstants.UTF8_ENCODING)); + } } - count++; } } finally { scanner.close(); Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java?rev=601111&r1=601110&r2=601111&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java Tue Dec 4 14:17:21 2007 @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.util.Arrays; import java.util.Map; import java.util.TreeMap; @@ -240,7 +239,7 @@ } LOG.info("Print table contents before map/reduce"); - scanTable(conf, SINGLE_REGION_TABLE_NAME); + scanTable(conf, SINGLE_REGION_TABLE_NAME, true); @SuppressWarnings("deprecation") MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1); @@ -264,7 +263,7 @@ } LOG.info("Print table contents after map/reduce"); - scanTable(conf, SINGLE_REGION_TABLE_NAME); + scanTable(conf, SINGLE_REGION_TABLE_NAME, true); // verify map-reduce results verify(conf, SINGLE_REGION_TABLE_NAME); @@ -326,8 +325,8 @@ } } - private void scanTable(HBaseConfiguration conf, String tableName) - throws IOException { + private void scanTable(HBaseConfiguration conf, String tableName, + boolean printValues) throws IOException { HTable table = new HTable(conf, new Text(tableName)); Text[] columns = { @@ -342,11 +341,13 @@ TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>(); while(scanner.next(key, results)) { - LOG.info("row: " + key.getRow()); - - for(Map.Entry<Text, byte[]> e: results.entrySet()) { - LOG.info(" column: " + e.getKey() + " value: " - + new String(e.getValue(), HConstants.UTF8_ENCODING)); + if (printValues) { + LOG.info("row: " + key.getRow()); + + for(Map.Entry<Text, byte[]> e: results.entrySet()) { + LOG.info(" column: " + e.getKey() + " value: " + + new String(e.getValue(), HConstants.UTF8_ENCODING)); + } } }