Author: jimk Date: Thu Dec 13 13:28:03 2007 New Revision: 604034 URL: http://svn.apache.org/viewvc?rev=604034&view=rev Log: HADOOP-2418 Fix assertion failures in TestTableMapReduce, TestTableIndex, and TestTableJoinMapReduce
Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/shell/algebra/TestTableJoinMapReduce.java Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?rev=604034&r1=604033&r2=604034&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original) +++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Thu Dec 13 13:28:03 2007 @@ -78,6 +78,8 @@ HADOOP-2397 The only time that a meta scanner should try to recover a log is when the master is starting HADOOP-2417 Fix critical shutdown problem introduced by HADOOP-2338 + HADOOP-2418 Fix assertion failures in TestTableMapReduce, TestTableIndex, + and TestTableJoinMapReduce IMPROVEMENTS HADOOP-2401 Add convenience put method that takes writable Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java?rev=604034&r1=604033&r2=604034&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java Thu Dec 13 13:28:03 2007 @@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HScannerInterface; @@ -68,6 +67,11 @@ static final Text TEXT_OUTPUT_COLUMN = new Text(OUTPUT_COLUMN); static final String ROWKEY_NAME = "key"; static final String INDEX_DIR = "testindex"; + private static final Text[] columns = { + TEXT_INPUT_COLUMN, + TEXT_OUTPUT_COLUMN + }; + private HTableDescriptor desc; @@ -146,7 +150,7 @@ if (printResults) { LOG.info("Print table contents before map/reduce"); } - scanTable(conf, printResults); + scanTable(printResults); @SuppressWarnings("deprecation") MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1); @@ -179,10 +183,10 @@ if (printResults) { LOG.info("Print table contents after map/reduce"); } - scanTable(conf, printResults); + scanTable(printResults); // verify index results - verify(conf); + verify(); } private String createIndexConfContent() { @@ -218,10 +222,9 @@ return c.toString(); } - private void scanTable(HBaseConfiguration c, boolean printResults) + private void scanTable(boolean printResults) throws IOException { - HTable table = new HTable(c, new Text(TABLE_NAME)); - Text[] columns = { TEXT_INPUT_COLUMN, TEXT_OUTPUT_COLUMN }; + HTable table = new HTable(conf, new Text(TABLE_NAME)); HScannerInterface scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW); try { @@ -243,7 +246,16 @@ } } - private void verify(HBaseConfiguration c) throws IOException { + private void verify() throws IOException { + // Sleep before we start the verify to ensure that when the scanner takes + // its snapshot, all the updates have made it into the cache. + try { + Thread.sleep(conf.getLong("hbase.regionserver.optionalcacheflushinterval", + 60L * 1000L)); + } catch (InterruptedException e) { + // ignore + } + Path localDir = new Path(this.testDir, "index_" + Integer.toString(new Random().nextInt())); this.fs.copyToLocalFile(new Path(INDEX_DIR), localDir); @@ -265,15 +277,14 @@ throw new IOException("no index directory found"); } - HTable table = new HTable(c, new Text(TABLE_NAME)); - Text[] columns = { TEXT_INPUT_COLUMN, TEXT_OUTPUT_COLUMN }; + HTable table = new HTable(conf, new Text(TABLE_NAME)); scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW); HStoreKey key = new HStoreKey(); TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>(); IndexConfiguration indexConf = new IndexConfiguration(); - String content = c.get("hbase.index.conf"); + String content = conf.get("hbase.index.conf"); if (content != null) { indexConf.addFromXML(content); } Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java?rev=604034&r1=604033&r2=604034&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java Thu Dec 13 13:28:03 2007 @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HScannerInterface; @@ -55,7 +54,7 @@ @SuppressWarnings("hiding") private static final Log LOG = LogFactory.getLog(TestTableMapReduce.class.getName()); - + static final String SINGLE_REGION_TABLE_NAME = "srtest"; static final String MULTI_REGION_TABLE_NAME = "mrtest"; static final String INPUT_COLUMN = "contents:"; @@ -63,6 +62,11 @@ static final String OUTPUT_COLUMN = "text:"; static final Text TEXT_OUTPUT_COLUMN = new Text(OUTPUT_COLUMN); + private static final Text[] columns = { + TEXT_INPUT_COLUMN, + TEXT_OUTPUT_COLUMN + }; + private MiniDFSCluster dfsCluster = null; private FileSystem fs; private Path dir; @@ -232,7 +236,7 @@ } LOG.info("Print table contents before map/reduce"); - scanTable(conf, SINGLE_REGION_TABLE_NAME, true); + scanTable(SINGLE_REGION_TABLE_NAME, true); @SuppressWarnings("deprecation") MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1); @@ -256,10 +260,10 @@ } LOG.info("Print table contents after map/reduce"); - scanTable(conf, SINGLE_REGION_TABLE_NAME, true); + scanTable(SINGLE_REGION_TABLE_NAME, true); // verify map-reduce results - verify(conf, SINGLE_REGION_TABLE_NAME); + verify(SINGLE_REGION_TABLE_NAME); } finally { table.close(); @@ -311,21 +315,17 @@ } // verify map-reduce results - verify(conf, MULTI_REGION_TABLE_NAME); + verify(MULTI_REGION_TABLE_NAME); } finally { table.close(); } } - private void scanTable(HBaseConfiguration conf, String tableName, - boolean printValues) throws IOException { + private void scanTable(String tableName, boolean printValues) + throws IOException { HTable table = new HTable(conf, new Text(tableName)); - Text[] columns = { - TEXT_INPUT_COLUMN, - TEXT_OUTPUT_COLUMN - }; HScannerInterface scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW); @@ -350,14 +350,17 @@ } @SuppressWarnings("null") - private void verify(HBaseConfiguration conf, String tableName) - throws IOException { + private void verify(String tableName) throws IOException { + // Sleep before we start the verify to ensure that when the scanner takes + // its snapshot, all the updates have made it into the cache. + try { + Thread.sleep(conf.getLong("hbase.regionserver.optionalcacheflushinterval", + 60L * 1000L)); + } catch (InterruptedException e) { + // ignore + } HTable table = new HTable(conf, new Text(tableName)); - Text[] columns = { - TEXT_INPUT_COLUMN, - TEXT_OUTPUT_COLUMN - }; HScannerInterface scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW); Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/shell/algebra/TestTableJoinMapReduce.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/shell/algebra/TestTableJoinMapReduce.java?rev=604034&r1=604033&r2=604034&view=diff ============================================================================== --- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/shell/algebra/TestTableJoinMapReduce.java (original) +++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/shell/algebra/TestTableJoinMapReduce.java Thu Dec 13 13:28:03 2007 @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HScannerInterface; @@ -94,7 +93,10 @@ StaticTestEnvironment.shutdownDfs(dfsCluster); } - public void testTableJoinMapReduce() { + /** + * @throws Exception + */ + public void testTableJoinMapReduce() throws Exception { HTable table = null; try { HTableDescriptor desc = new HTableDescriptor(FIRST_RELATION); @@ -188,16 +190,20 @@ JobClient.runJob(jobConf); - } catch (IOException e) { + } catch (Exception e) { e.printStackTrace(); + throw e; } finally { - mrCluster.shutdown(); + if (mrCluster != null) { + mrCluster.shutdown(); + } } try { - verify(conf, OUTPUT_TABLE); - } catch (IOException e) { + verify(OUTPUT_TABLE); + } catch (Exception e) { e.printStackTrace(); + throw e; } } @@ -208,8 +214,16 @@ * @param outputTable * @throws IOException */ - private void verify(HBaseConfiguration conf, String outputTable) - throws IOException { + private void verify(String outputTable) throws IOException { + // Sleep before we start the verify to ensure that when the scanner takes + // its snapshot, all the updates have made it into the cache. + try { + Thread.sleep(conf.getLong("hbase.regionserver.optionalcacheflushinterval", + 60L * 1000L)); + } catch (InterruptedException e) { + // ignore + } + HTable table = new HTable(conf, new Text(outputTable)); Text[] columns = { new Text("a:"), new Text("b:"), new Text("c:"), new Text("d:"), new Text("e:") }; @@ -222,8 +236,8 @@ int i = 0; while (scanner.next(key, results)) { - assertTrue(results.keySet().size() == 5); LOG.info("result_table.column.size: " + results.keySet().size()); + assertEquals(5, results.keySet().size()); i++; } assertTrue(i == 3);