Modified: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?view=diff&rev=539243&r1=539242&r2=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
 (original)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
 Thu May 17 20:22:54 2007
@@ -35,9 +35,9 @@
   private MiniDFSCluster cluster;
   private FileSystem fs;
   private Path parentdir;
-  private HMasterRunner masterRunner;
-  private Thread masterRunnerThread;
-  private HRegionServerRunner[] regionServers;
+  private HMaster master;
+  private Thread masterThread;
+  private HRegionServer[] regionServers;
   private Thread[] regionThreads;
   
   public MiniHBaseCluster(Configuration conf, int nRegionNodes) {
@@ -58,13 +58,13 @@
 
     try {
       try {
-        if(System.getProperty("test.build.data") == null) {
+        if(System.getProperty(StaticTestEnvironment.TEST_DIRECTORY_KEY) == 
null) {
           File testDir = new File(new File("").getAbsolutePath(),
               "build/contrib/hbase/test");
 
           String dir = testDir.getAbsolutePath();
           LOG.info("Setting test.build.data to " + dir);
-          System.setProperty("test.build.data", dir);
+          System.setProperty(StaticTestEnvironment.TEST_DIRECTORY_KEY, dir);
         }
 
         if (miniHdfsFilesystem) {
@@ -85,26 +85,15 @@
       }
       
       // Create the master
-      this.masterRunner = new HMasterRunner();
-      this.masterRunnerThread = new Thread(masterRunner, "masterRunner");
+      this.master = new HMaster(conf);
+      this.masterThread = new Thread(this.master, "HMaster");
 
       // Start up the master
       LOG.info("Starting HMaster");
-      masterRunnerThread.start();
-      while(! masterRunner.isCrashed() && ! masterRunner.isInitialized()) {
-        try {
-          LOG.info("...waiting for HMaster to initialize...");
-          Thread.sleep(1000);
-        } catch(InterruptedException e) {
-        }
-        if(masterRunner.isCrashed()) {
-          throw new RuntimeException("HMaster crashed");
-        }
-      }
-      LOG.info("HMaster started.");
+      masterThread.start();
       
       // Set the master's port for the HRegionServers
-      String address = masterRunner.getHMasterAddress().toString();
+      String address = master.getMasterAddress().toString();
       this.conf.set(MASTER_ADDRESS, address);
 
       // Start the HRegionServers
@@ -115,34 +104,20 @@
       
       LOG.info("Starting HRegionServers");
       startRegionServers(this.conf, nRegionNodes);
-      LOG.info("HRegionServers running");
-
-      // Wait for things to get started
-
-      while(! masterRunner.isCrashed() && ! masterRunner.isUp()) {
-        try {
-          LOG.info("Waiting for Mini HBase cluster to start...");
-          Thread.sleep(1000);
-        } catch(InterruptedException e) {
-        }
-        if(masterRunner.isCrashed()) {
-          throw new RuntimeException("HMaster crashed");
-        }
-      }
       
     } catch(Throwable e) {
-      // Delete all DFS files
-      deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
-      throw new RuntimeException("Mini HBase cluster did not start");
+      e.printStackTrace();
+      shutdown();
     }
   }
   
-  private void startRegionServers(Configuration conf, int nRegionNodes) {
-    this.regionServers = new HRegionServerRunner[nRegionNodes];
+  private void startRegionServers(Configuration conf, int nRegionNodes)
+      throws IOException {
+    this.regionServers = new HRegionServer[nRegionNodes];
     this.regionThreads = new Thread[nRegionNodes];
     
     for(int i = 0; i < nRegionNodes; i++) {
-      regionServers[i] = new HRegionServerRunner(conf);
+      regionServers[i] = new HRegionServer(conf);
       regionThreads[i] = new Thread(regionServers[i], "HRegionServer-" + i);
       regionThreads[i].start();
     }
@@ -153,35 +128,48 @@
    * supplied port is not necessarily the actual port used.
    */
   public HServerAddress getHMasterAddress() {
-    return masterRunner.getHMasterAddress();
+    return master.getMasterAddress();
   }
   
   /** Shut down the HBase cluster */
   public void shutdown() {
     LOG.info("Shutting down the HBase Cluster");
     for(int i = 0; i < regionServers.length; i++) {
-      regionServers[i].shutdown();
+      try {
+        regionServers[i].stop();
+        
+      } catch(IOException e) {
+        e.printStackTrace();
+      }
+    }
+    try {
+      master.shutdown();
+      
+    } catch(IOException e) {
+      e.printStackTrace();
     }
-    masterRunner.shutdown();
     for(int i = 0; i < regionServers.length; i++) {
       try {
         regionThreads[i].join();
-      } catch (InterruptedException e) {
-        e.printStackTrace();
+        
+      } catch(InterruptedException e) {
       }
     }
     try {
-      masterRunnerThread.join();
-    } catch (InterruptedException e) {
-      e.printStackTrace();
+      masterThread.join();
+      
+    } catch(InterruptedException e) {
     }
-    if (cluster != null) {
+    LOG.info("HBase Cluster shutdown complete");
+
+    if(cluster != null) {
       LOG.info("Shutting down Mini DFS cluster");
       cluster.shutdown();
     }
     
     // Delete all DFS files
-    deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
+    deleteFile(new File(System.getProperty(
+        StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs"));
   }
   
   private void deleteFile(File f) {
@@ -192,127 +180,5 @@
       }
     }
     f.delete();
-  }
-  
-  private class HMasterRunner implements Runnable {
-    private HMaster master = null;
-    private Thread masterThread = null;
-    private volatile boolean isInitialized = false;
-    private boolean isCrashed = false;
-    private boolean isRunning = true;
-    private long threadSleepTime = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 
1000);
-    
-    public HServerAddress getHMasterAddress() {
-      return this.master.getMasterAddress();
-    }
-    
-    public synchronized boolean isInitialized() {
-      return isInitialized;
-    }
-    
-    public synchronized boolean isCrashed() {
-      return isCrashed;
-    }
-    
-    public boolean isUp() {
-      if(master == null) {
-        return false;
-      }
-      synchronized(this) {
-        return isInitialized;
-      }
-    }
-    
-    /** Create the HMaster and run it */
-    public void run() {
-      try {
-        synchronized(this) {
-          if(isRunning) {
-            this.master = new HMaster(conf);
-            masterThread = new Thread(this.master);
-            masterThread.start();
-          }
-          isInitialized = true;
-        }
-      } catch(Throwable e) {
-        shutdown();
-        LOG.error("HMaster crashed:", e);
-        synchronized(this) {
-          isCrashed = true;
-        }
-      }
-
-      while(this.master != null && this.master.isMasterRunning()) {
-        try {
-          Thread.sleep(threadSleepTime);
-          
-        } catch(InterruptedException e) {
-        }
-      }
-      synchronized(this) {
-        isCrashed = true;
-      }
-      shutdown();
-    }
-    
-    /** Shut down the HMaster and wait for it to finish */
-    public synchronized void shutdown() {
-      isRunning = false;
-      if (this.master != null) {
-        try {
-          this.master.shutdown();
-        } catch(IOException e) {
-          LOG.error("Master crashed during stop", e);
-        } finally {
-          try {
-            masterThread.join();
-          } catch(InterruptedException e) {
-          }
-          master = null;
-        }
-      }
-    }
-  }
-  
-  private class HRegionServerRunner implements Runnable {
-    private HRegionServer server = null;
-    private boolean isRunning = true;
-    private Configuration conf;
-    
-    public HRegionServerRunner(Configuration conf) {
-      this.conf = conf;
-    }
-    
-    /** Start up the HRegionServer */
-    public void run() {
-      try {
-        synchronized(this) {
-          if(isRunning) {
-            server = new HRegionServer(conf);
-          }
-        }
-        server.run();
-        
-      } catch(Throwable e) {
-        shutdown();
-        LOG.error("HRegionServer crashed:", e);
-      }
-    }
-    
-    /** Shut down the HRegionServer */
-    public synchronized void shutdown() {
-      isRunning = false;
-      if(server != null) {
-        try {
-          server.stop();
-          
-        } catch(IOException e) {
-          LOG.error("HRegionServer crashed during stop", e);
-        } finally {
-          server.join();
-          server = null;
-        }
-      }
-    }
   }
 }

Added: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java?view=auto&rev=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
 (added)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/StaticTestEnvironment.java
 Thu May 17 20:22:54 2007
@@ -0,0 +1,92 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.File;
+import java.util.Enumeration;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+
+public class StaticTestEnvironment {
+  private StaticTestEnvironment() {};                   // Not instantiable
+
+  public static final String TEST_DIRECTORY_KEY = "test.build.data";
+  public static boolean debugging = false;
+
+  @SuppressWarnings("unchecked")
+  public static void initialize() {
+    String value = null;
+    
+    if (System.getProperty(TEST_DIRECTORY_KEY) == null) {
+      System.setProperty(TEST_DIRECTORY_KEY, new File(
+          "build/contrib/hbase/test").getAbsolutePath());
+    }
+    
+    value = System.getenv("DEBUGGING");
+    if(value != null && value.equalsIgnoreCase("TRUE")) {
+      debugging = true;
+      
+      Logger rootLogger = Logger.getRootLogger();
+      rootLogger.setLevel(Level.WARN);
+
+      Level logLevel = Level.INFO;
+      value = System.getenv("LOGGING_LEVEL");
+      if(value != null && value.length() != 0) {
+        if(value.equalsIgnoreCase("ALL")) {
+          logLevel = Level.ALL;
+        } else if(value.equalsIgnoreCase("DEBUG")) {
+          logLevel = Level.DEBUG;
+        } else if(value.equalsIgnoreCase("ERROR")) {
+          logLevel = Level.ERROR;
+        } else if(value.equalsIgnoreCase("FATAL")) {
+          logLevel = Level.FATAL;
+        } else if(value.equalsIgnoreCase("INFO")) {
+          logLevel = Level.INFO;
+        } else if(value.equalsIgnoreCase("OFF")) {
+          logLevel = Level.OFF;
+        } else if(value.equalsIgnoreCase("TRACE")) {
+          logLevel = Level.TRACE;
+        } else if(value.equalsIgnoreCase("WARN")) {
+          logLevel = Level.WARN;
+        }
+      }
+      ConsoleAppender consoleAppender = null;
+      for(Enumeration<Appender> e = rootLogger.getAllAppenders();
+          e.hasMoreElements();) {
+
+        Appender a = e.nextElement();
+        if(a instanceof ConsoleAppender) {
+          consoleAppender = (ConsoleAppender)a;
+          break;
+        }
+      }
+      if(consoleAppender != null) {
+        Layout layout = consoleAppender.getLayout();
+        if(layout instanceof PatternLayout) {
+          PatternLayout consoleLayout = (PatternLayout)layout;
+          consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
+        }
+      }
+      Logger.getLogger(
+          HBaseTestCase.class.getPackage().getName()).setLevel(logLevel);
+    }    
+  }
+}

Modified: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java?view=diff&rev=539243&r1=539242&r2=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
 (original)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
 Thu May 17 20:22:54 2007
@@ -17,29 +17,17 @@
 
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
-import java.io.File;
 import java.io.IOException;
-import java.util.Enumeration;
 import java.util.Iterator;
 import java.util.TreeMap;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
-import org.apache.log4j.Appender;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-
-import junit.framework.TestCase;
-
-public class TestGet extends TestCase {
+public class TestGet extends HBaseTestCase {
   private static final Text CONTENTS = new Text("contents:");
   private static final Text ROW_KEY = new 
Text(HGlobals.rootRegionInfo.regionName);
 
@@ -71,7 +59,6 @@
     }
   }
   
-  @SuppressWarnings("unchecked")
   public void testGet() throws IOException {
     MiniDFSCluster cluster = null;
 
@@ -79,37 +66,6 @@
       
       // Initialization
       
-      if(System.getProperty("test.build.data") == null) {
-        String dir = new File(new File("").getAbsolutePath(), 
"build/contrib/hbase/test").getAbsolutePath();
-        System.out.println(dir);
-        System.setProperty("test.build.data", dir);
-      }
-      Configuration conf = new HBaseConfiguration();
-    
-      Environment.getenv();
-      if(Environment.debugging) {
-        Logger rootLogger = Logger.getRootLogger();
-        rootLogger.setLevel(Level.WARN);
-
-        ConsoleAppender consoleAppender = null;
-        for(Enumeration<Appender> e = 
(Enumeration<Appender>)rootLogger.getAllAppenders();
-            e.hasMoreElements();) {
-        
-          Appender a = e.nextElement();
-          if(a instanceof ConsoleAppender) {
-            consoleAppender = (ConsoleAppender)a;
-            break;
-          }
-        }
-        if(consoleAppender != null) {
-          Layout layout = consoleAppender.getLayout();
-          if(layout instanceof PatternLayout) {
-            PatternLayout consoleLayout = (PatternLayout)layout;
-            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
-          }
-        }
-        
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
-      }
       cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
       FileSystem fs = cluster.getFileSystem();
       Path dir = new Path("/hbase");

Modified: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?view=diff&rev=539243&r1=539242&r2=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
 (original)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
 Thu May 17 20:22:54 2007
@@ -15,45 +15,52 @@
  */
 package org.apache.hadoop.hbase;
 
-import java.io.File;
 import java.io.IOException;
-import java.util.Enumeration;
 import java.util.Iterator;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
-import junit.framework.TestCase;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 
-import org.apache.log4j.Appender;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Logger;
-import org.apache.log4j.Level;
-import org.apache.log4j.PatternLayout;
-
 /**
  * Test HBase Master and Region servers, client API 
  */
-public class TestHBaseCluster extends TestCase {
+public class TestHBaseCluster extends HBaseClusterTestCase {
+
+  private HTableDescriptor desc;
+  private HClient client;
 
   /** constructor */
-  public TestHBaseCluster(String name) {
-    super(name);
+  public TestHBaseCluster() {
+    super(true);
+    this.desc = null;
+    this.client = null;
+  }
+
+  /**
+   * Since all the "tests" depend on the results of the previous test, they are
+   * not Junit tests that can stand alone. Consequently we have a single Junit
+   * test that runs the "sub-tests" as private methods.
+   */
+  public void testHBaseCluster() {
+    try {
+      setup();
+      basic();
+      scanner();
+      listTables();
+      cleanup();
+      
+    } catch(IOException e) {
+      e.printStackTrace();
+      fail();
+    }
   }
 
-  /** Test suite so that all tests get run */
-  public static Test suite() {
-    TestSuite suite = new TestSuite();
-    suite.addTest(new TestHBaseCluster("testSetup"));
-    suite.addTest(new TestHBaseCluster("testBasic"));
-    suite.addTest(new TestHBaseCluster("testScanner"));
-    suite.addTest(new TestHBaseCluster("testCleanup"));
-    return suite;
+  public void tearDown() throws Exception {
+    super.tearDown();
+    if(client != null) {
+      client.close();
+    }
   }
 
   private static final int FIRST_ROW = 1;
@@ -65,126 +72,61 @@
   private static final String ANCHORNUM = "anchor:anchornum-";
   private static final String ANCHORSTR = "anchorstr";
 
-  private static Configuration conf = null;
-  private static boolean failures = false;
-  private static boolean initialized = false;
-  private static MiniHBaseCluster cluster = null;
-  private static HTableDescriptor desc = null;
-  private static HClient client = null;
-
-  // Set up environment, start mini cluster, etc.
-  
-  @SuppressWarnings("unchecked")
-  public void testSetup() throws Exception {
-    try {
-      if(System.getProperty("test.build.data") == null) {
-        String dir = new File(new File("").getAbsolutePath(), 
"build/contrib/hbase/test").getAbsolutePath();
-        System.out.println(dir);
-        System.setProperty("test.build.data", dir);
-      }
-      conf = new HBaseConfiguration();
-      
-      Environment.getenv();
-      Logger rootLogger = Logger.getRootLogger();
-      if(Environment.debugging) {
-        rootLogger.setLevel(Level.WARN);
-      }
-
-      ConsoleAppender consoleAppender = null;
-      for(Enumeration<Appender> e = 
(Enumeration<Appender>)rootLogger.getAllAppenders();
-      e.hasMoreElements();) {
-
-        Appender a = e.nextElement();
-        if(a instanceof ConsoleAppender) {
-          consoleAppender = (ConsoleAppender)a;
-          break;
-        }
-      }
-      if(consoleAppender != null) {
-        Layout layout = consoleAppender.getLayout();
-        if(layout instanceof PatternLayout) {
-          PatternLayout consoleLayout = (PatternLayout)layout;
-          consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
-        }
-      }
-      
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
-
-      cluster = new MiniHBaseCluster(conf, 1);
-      client = new HClient(conf);
-
-      desc = new HTableDescriptor("test", 3);
-      desc.addFamily(new Text(CONTENTS));
-      desc.addFamily(new Text(ANCHOR));
-      client.createTable(desc);
-      
-    } catch(Exception e) {
-      failures = true;
-      throw e;
-    }
-    initialized = true;
+  private void setup() throws IOException {
+    client = new HClient(conf);
+    desc = new HTableDescriptor("test", 3);
+    desc.addFamily(new Text(CONTENTS));
+    desc.addFamily(new Text(ANCHOR));
+    client.createTable(desc);
   }
       
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
 
-  public void testBasic() throws IOException {
-    if(!initialized) {
-      throw new IllegalStateException();
-    }
-
-    try {
-      long startTime = System.currentTimeMillis();
-      
-      client.openTable(desc.getName());
-      
-      // Write out a bunch of values
-      
-      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
-        long writeid = client.startUpdate(new Text("row_" + k));
-        client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
-        client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + 
k).getBytes());
-        client.commit(writeid);
-      }
-      System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
+  private void basic() throws IOException {
+    long startTime = System.currentTimeMillis();
 
-      // Read them back in
+    client.openTable(desc.getName());
 
-      startTime = System.currentTimeMillis();
-      
-      Text collabel = null;
-      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
-        Text rowlabel = new Text("row_" + k);
-
-        byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC);
-        assertNotNull(bodydata);
-        String bodystr = new String(bodydata).toString().trim();
-        String teststr = CONTENTSTR + k;
-        assertEquals("Incorrect value for key: (" + rowlabel + "," + 
CONTENTS_BASIC
-            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-            bodystr, teststr);
-        collabel = new Text(ANCHORNUM + k);
-        bodydata = client.get(rowlabel, collabel);
-        bodystr = new String(bodydata).toString().trim();
-        teststr = ANCHORSTR + k;
-        assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
-            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-            bodystr, teststr);
-      }
-      
-      System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
+    // Write out a bunch of values
 
-    } catch(IOException e) {
-      failures = true;
-      throw e;
+    for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+      long writeid = client.startUpdate(new Text("row_" + k));
+      client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
+      client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
+      client.commit(writeid);
+    }
+    System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    // Read them back in
+
+    startTime = System.currentTimeMillis();
+
+    Text collabel = null;
+    for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+      Text rowlabel = new Text("row_" + k);
+
+      byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC);
+      assertNotNull(bodydata);
+      String bodystr = new String(bodydata).toString().trim();
+      String teststr = CONTENTSTR + k;
+      assertEquals("Incorrect value for key: (" + rowlabel + "," + 
CONTENTS_BASIC
+          + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+          bodystr, teststr);
+      collabel = new Text(ANCHORNUM + k);
+      bodydata = client.get(rowlabel, collabel);
+      bodystr = new String(bodydata).toString().trim();
+      teststr = ANCHORSTR + k;
+      assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+          + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+          bodystr, teststr);
     }
+
+    System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
   }
   
-  public void testScanner() throws IOException {
-    if(!initialized || failures) {
-      throw new IllegalStateException();
-    }
-
+  private void scanner() throws IOException {
     Text[] cols = new Text[] {
         new Text(ANCHORNUM + "[0-9]+"),
         new Text(CONTENTS_BASIC)
@@ -234,57 +176,31 @@
           + " rows. Elapsed time: "
           + ((System.currentTimeMillis() - startTime) / 1000.0));
 
-    } catch(IOException e) {
-      failures = true;
-      throw e;
-      
     } finally {
       s.close();
     }
   }
 
-  public void testListTables() throws IOException {
-    if(!initialized || failures) {
-      throw new IllegalStateException();
-    }
-    
-    try {
-      HTableDescriptor[] tables = client.listTables();
-      assertEquals(1, tables.length);
-      assertEquals(desc.getName(), tables[0].getName());
-      TreeSet<Text> families = tables[0].families();
-      assertEquals(2, families.size());
-      assertTrue(families.contains(new Text(CONTENTS)));
-      assertTrue(families.contains(new Text(ANCHOR)));
-      
-    } catch(IOException e) {
-      failures = true;
-      throw e;
-    }
+  private void listTables() throws IOException {
+    HTableDescriptor[] tables = client.listTables();
+    assertEquals(1, tables.length);
+    assertEquals(desc.getName(), tables[0].getName());
+    TreeSet<Text> families = tables[0].families();
+    assertEquals(2, families.size());
+    assertTrue(families.contains(new Text(CONTENTS)));
+    assertTrue(families.contains(new Text(ANCHOR)));
   }
   
-  public void testCleanup() throws IOException {
-    if(!initialized) {
-      throw new IllegalStateException();
-    }
-    
+  private void cleanup() throws IOException {
+
+    // Delete the table we created
+
+    client.deleteTable(desc.getName());
     try {
-      if(!failures) {
-        // Delete the table we created
+      Thread.sleep(30000);                    // Wait for table to be deleted
 
-        client.deleteTable(desc.getName());
-        try {
-          Thread.sleep(60000);                  // Wait for table to be deleted
-          
-        } catch(InterruptedException e) {
-        }
-      }
-      
-    } finally {
-      // Shut down the cluster
-    
-      cluster.shutdown();
-      client.close();
+    } catch(InterruptedException e) {
     }
   }
+  
 }

Added: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java?view=auto&rev=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
 (added)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
 Thu May 17 20:22:54 2007
@@ -0,0 +1,97 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.SequenceFile.Reader;
+
+public class TestHLog extends HBaseTestCase implements HConstants {
+
+  protected void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  public void testAppend() throws Exception {
+    Path dir = getUnitTestdir(getName());
+    FileSystem fs = FileSystem.get(this.conf);
+    if (fs.exists(dir)) {
+      fs.delete(dir);
+    }
+    final int COL_COUNT = 10;
+    final Text regionName = new Text("regionname");
+    final Text tableName = new Text("tablename");
+    final Text row = new Text("row");
+    Reader reader = null;
+    HLog log = new HLog(fs, dir, this.conf);
+    try {
+      // Write columns named 1, 2, 3, etc. and then values of single byte
+      // 1, 2, 3...
+      TreeMap<Text, BytesWritable> cols = new TreeMap<Text, BytesWritable>();
+      for (int i = 0; i < COL_COUNT; i++) {
+        cols.put(new Text(Integer.toString(i)),
+          new BytesWritable(new byte[] { (byte)(i + '0') }));
+      }
+      long timestamp = System.currentTimeMillis();
+      log.append(regionName, tableName, row, cols, timestamp);
+      long logSeqId = log.startCacheFlush();
+      log.completeCacheFlush(regionName, tableName, logSeqId);
+      log.close();
+      Path filename = log.computeFilename(log.filenum - 1);
+      log = null;
+      // Now open a reader on the log and assert append worked.
+      reader = new SequenceFile.Reader(fs, filename, conf);
+      HLogKey key = new HLogKey();
+      HLogEdit val = new HLogEdit();
+      for (int i = 0; i < COL_COUNT; i++) {
+        reader.next(key, val);
+        assertEquals(key.getRegionName(), regionName);
+        assertEquals(key.getTablename(), tableName);
+        assertEquals(key.getRow(), row);
+        assertEquals(val.getVal().get()[0], (byte)(i + '0'));
+        System.out.println(key + " " + val);
+      }
+      while (reader.next(key, val)) {
+        // Assert only one more row... the meta flushed row.
+        assertEquals(key.getRegionName(), regionName);
+        assertEquals(key.getTablename(), tableName);
+        assertEquals(key.getRow(), HLog.METAROW);
+        assertEquals(val.getColumn(), HLog.METACOLUMN);
+        assertEquals(0, val.getVal().compareTo(COMPLETE_CACHEFLUSH));
+        System.out.println(key + " " + val);
+      }
+    } finally {
+      if (log != null) {
+        log.close();
+      }
+      if (reader != null) {
+        reader.close();
+      }
+      if (fs.exists(dir)) {
+        fs.delete(dir);
+      }
+    }
+  }
+
+  protected void tearDown() throws Exception {
+    super.tearDown();
+  }
+}
\ No newline at end of file

Modified: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=539243&r1=539242&r2=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
 (original)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
 Thu May 17 20:22:54 2007
@@ -15,31 +15,20 @@
  */
 package org.apache.hadoop.hbase;
 
-import junit.framework.TestCase;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Enumeration;
 import java.util.Iterator;
 import java.util.List;
 import java.util.TreeMap;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
-import org.apache.log4j.Appender;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Layout;
 import org.apache.log4j.Logger;
-import org.apache.log4j.Level;
-import org.apache.log4j.PatternLayout;
 
 /**
  * Basic stand-alone testing of HRegion.
@@ -47,27 +36,35 @@
  * A lot of the meta information for an HRegion now lives inside other
  * HRegions or in the HBaseMaster, so only basic testing is possible.
  */
-public class TestHRegion extends TestCase {
+public class TestHRegion extends HBaseTestCase implements 
RegionUnavailableListener {
   private Logger LOG = Logger.getLogger(this.getClass().getName());
   
   /** Constructor */
-  public TestHRegion(String name) {
-    super(name);
+  public TestHRegion() {
+    super();
   }
   
-  /** Test suite so that all tests get run */
-  public static Test suite() {
-    TestSuite suite = new TestSuite();
-    suite.addTest(new TestHRegion("testSetup"));
-    suite.addTest(new TestHRegion("testLocks"));
-    suite.addTest(new TestHRegion("testBadPuts"));
-    suite.addTest(new TestHRegion("testBasic"));
-    suite.addTest(new TestHRegion("testScan"));
-    suite.addTest(new TestHRegion("testBatchWrite"));
-    suite.addTest(new TestHRegion("testSplitAndMerge"));
-    suite.addTest(new TestHRegion("testRead"));
-    suite.addTest(new TestHRegion("testCleanup"));
-    return suite;
+  /**
+   * Since all the "tests" depend on the results of the previous test, they are
+   * not Junit tests that can stand alone. Consequently we have a single Junit
+   * test that runs the "sub-tests" as private methods.
+   */
+  public void testHRegion() {
+    try {
+      setup();
+      locks();
+      badPuts();
+      basic();
+      scan();
+      batchWrite();
+      splitAndMerge();
+      read();
+      cleanup();
+      
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail();
+    }
   }
   
   
@@ -82,9 +79,6 @@
   private static final Text CONTENTS_FIRSTCOL = new Text("contents:firstcol");
   private static final Text ANCHOR_SECONDCOL = new Text("anchor:secondcol");
   
-  private static boolean initialized = false;
-  private static boolean failures = false;
-  private static Configuration conf = null;
   private static MiniDFSCluster cluster = null;
   private static FileSystem fs = null;
   private static Path parentdir = null;
@@ -96,138 +90,86 @@
   
   private static int numInserted = 0;
 
-  // Set up environment, start mini cluster, etc.
+  // Create directories, start mini cluster, etc.
   
-  @SuppressWarnings("unchecked")
-  public void testSetup() throws IOException {
-    try {
-      if(System.getProperty("test.build.data") == null) {
-        String dir = new File(new File("").getAbsolutePath(), 
"build/contrib/hbase/test").getAbsolutePath();
-        System.out.println(dir);
-        System.setProperty("test.build.data", dir);
-      }
-      conf = new HBaseConfiguration();
-      
-      Environment.getenv();
-      if(Environment.debugging) {
-        Logger rootLogger = Logger.getRootLogger();
-        rootLogger.setLevel(Level.WARN);
-
-        ConsoleAppender consoleAppender = null;
-        for(Enumeration<Appender> e = 
(Enumeration<Appender>)rootLogger.getAllAppenders();
-            e.hasMoreElements();) {
-        
-          Appender a = e.nextElement();
-          if(a instanceof ConsoleAppender) {
-            consoleAppender = (ConsoleAppender)a;
-            break;
-          }
-        }
-        if(consoleAppender != null) {
-          Layout layout = consoleAppender.getLayout();
-          if(layout instanceof PatternLayout) {
-            PatternLayout consoleLayout = (PatternLayout)layout;
-            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
-          }
-        }
-        
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
-      }
-      
-      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
-      fs = cluster.getFileSystem();
-      parentdir = new Path("/hbase");
-      fs.mkdirs(parentdir);
-      newlogdir = new Path(parentdir, "log");
-      oldlogfile = new Path(parentdir, "oldlogfile");
-
-      log = new HLog(fs, newlogdir, conf);
-      desc = new HTableDescriptor("test", 3);
-      desc.addFamily(new Text("contents:"));
-      desc.addFamily(new Text("anchor:"));
-      region = new HRegion(parentdir, log, fs, conf, 
-          new HRegionInfo(1, desc, null, null), null, oldlogfile);
-      
-    } catch(IOException e) {
-      failures = true;
-      throw e;
-    }
-    initialized = true;
+  private void setup() throws IOException {
+
+    cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+    fs = cluster.getFileSystem();
+    parentdir = new Path("/hbase");
+    fs.mkdirs(parentdir);
+    newlogdir = new Path(parentdir, "log");
+    oldlogfile = new Path(parentdir, "oldlogfile");
+
+    log = new HLog(fs, newlogdir, conf);
+    desc = new HTableDescriptor("test", 3);
+    desc.addFamily(new Text("contents:"));
+    desc.addFamily(new Text("anchor:"));
+    region = new HRegion(parentdir, log, fs, conf, 
+        new HRegionInfo(1, desc, null, null), null, oldlogfile);
   }
 
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
 
-  public void testBasic() throws IOException {
-    if(!initialized) {
-      throw new IllegalStateException();
+  private void basic() throws IOException {
+    long startTime = System.currentTimeMillis();
+
+    // Write out a bunch of values
+
+    for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+      long writeid = region.startUpdate(new Text("row_" + k));
+      region.put(writeid, CONTENTS_BASIC,
+          new BytesWritable((CONTENTSTR + k).getBytes()));
+
+      region.put(writeid, new Text(ANCHORNUM + k),
+          new BytesWritable((ANCHORSTR + k).getBytes()));
+      region.commit(writeid);
     }
+    System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
 
-    try {
-      long startTime = System.currentTimeMillis();
-      
-      // Write out a bunch of values
-      
-      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
-        long writeid = region.startUpdate(new Text("row_" + k));
-        region.put(writeid, CONTENTS_BASIC,
-            new BytesWritable((CONTENTSTR + k).getBytes()));
-        
-        region.put(writeid, new Text(ANCHORNUM + k),
-            new BytesWritable((ANCHORSTR + k).getBytes()));
-        region.commit(writeid);
-      }
-      System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
+    // Flush cache
 
-      // Flush cache
-      
-      startTime = System.currentTimeMillis();
-      
-      region.flushcache(false);
-      
-      System.out.println("Cache flush elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
+    startTime = System.currentTimeMillis();
 
-      // Read them back in
+    region.flushcache(false);
 
-      startTime = System.currentTimeMillis();
-      
-      Text collabel = null;
-      for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
-        Text rowlabel = new Text("row_" + k);
-
-        BytesWritable bodydata = region.get(rowlabel, CONTENTS_BASIC);
-        assertNotNull(bodydata);
-        byte[] bytes = new byte[bodydata.getSize()];
-        System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
-        String bodystr = new String(bytes).toString().trim();
-        String teststr = CONTENTSTR + k;
-        assertEquals("Incorrect value for key: (" + rowlabel + "," + 
CONTENTS_BASIC
-            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-            bodystr, teststr);
-        collabel = new Text(ANCHORNUM + k);
-        bodydata = region.get(rowlabel, collabel);
-        bytes = new byte[bodydata.getSize()];
-        System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
-        bodystr = new String(bytes).toString().trim();
-        teststr = ANCHORSTR + k;
-        assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
-            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-            bodystr, teststr);
-      }
-      
-      System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
+    System.out.println("Cache flush elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
 
-    } catch(IOException e) {
-      failures = true;
-      throw e;
+    // Read them back in
+
+    startTime = System.currentTimeMillis();
+
+    Text collabel = null;
+    for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+      Text rowlabel = new Text("row_" + k);
+
+      BytesWritable bodydata = region.get(rowlabel, CONTENTS_BASIC);
+      assertNotNull(bodydata);
+      byte[] bytes = new byte[bodydata.getSize()];
+      System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
+      String bodystr = new String(bytes).toString().trim();
+      String teststr = CONTENTSTR + k;
+      assertEquals("Incorrect value for key: (" + rowlabel + "," + 
CONTENTS_BASIC
+          + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+          bodystr, teststr);
+      collabel = new Text(ANCHORNUM + k);
+      bodydata = region.get(rowlabel, collabel);
+      bytes = new byte[bodydata.getSize()];
+      System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
+      bodystr = new String(bytes).toString().trim();
+      teststr = ANCHORSTR + k;
+      assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+          + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+          bodystr, teststr);
     }
+
+    System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
   }
   
-  public void testBadPuts() throws IOException {
-    if(!initialized) {
-      throw new IllegalStateException();
-    }  
+  private void badPuts() throws IOException {
     
     // Try put with bad lockid.
     boolean exceptionThrown = false;
@@ -259,7 +201,7 @@
   /**
    * Test getting and releasing locks.
    */
-  public void testLocks() {
+  private void locks() {
     final int threadCount = 10;
     final int lockCount = 10;
     
@@ -317,11 +259,7 @@
 
   // Test scanners. Writes contents:firstcol and anchor:secondcol
   
-  public void testScan() throws IOException {
-    if(!initialized) {
-      throw new IllegalStateException();
-    }
-
+  private void scan() throws IOException {
     Text cols[] = new Text[] {
         CONTENTS_FIRSTCOL,
         ANCHOR_SECONDCOL
@@ -583,138 +521,126 @@
   // long time to run.
   // Creates contents:body
   
-  public void testBatchWrite() throws IOException {
-    if(!initialized || failures) {
-      throw new IllegalStateException();
-    }
-    if(! Environment.debugging) {
+  private void batchWrite() throws IOException {
+    if(! StaticTestEnvironment.debugging) {
       return;
     }
 
-    try {
-      long totalFlush = 0;
-      long totalCompact = 0;
-      long totalLog = 0;
-      long startTime = System.currentTimeMillis();
-
-      // 1M writes
-
-      int valsize = 1000;
-      for (int k = FIRST_ROW; k <= N_ROWS; k++) {
-        // Come up with a random 1000-byte string
-        String randstr1 = "" + System.currentTimeMillis();
-        StringBuffer buf1 = new StringBuffer("val_" + k + "__");
-        while (buf1.length() < valsize) {
-          buf1.append(randstr1);
-        }
-
-        // Write to the HRegion
-        long writeid = region.startUpdate(new Text("row_" + k));
-        region.put(writeid, CONTENTS_BODY, new 
BytesWritable(buf1.toString().getBytes()));
-        region.commit(writeid);
-        if (k > 0 && k % (N_ROWS / 100) == 0) {
-          System.out.println("Flushing write #" + k);
-
-          long flushStart = System.currentTimeMillis();
-          region.flushcache(false);
-          long flushEnd = System.currentTimeMillis();
-          totalFlush += (flushEnd - flushStart);
-
-          if (k % (N_ROWS / 10) == 0) {
-            System.out.print("Rolling log...");
-            long logStart = System.currentTimeMillis();
-            log.rollWriter();
-            long logEnd = System.currentTimeMillis();
-            totalLog += (logEnd - logStart);
-            System.out.println("  elapsed time: " + ((logEnd - logStart) / 
1000.0));
-          }
+    long totalFlush = 0;
+    long totalCompact = 0;
+    long totalLog = 0;
+    long startTime = System.currentTimeMillis();
+
+    // 1M writes
+
+    int valsize = 1000;
+    for (int k = FIRST_ROW; k <= N_ROWS; k++) {
+      // Come up with a random 1000-byte string
+      String randstr1 = "" + System.currentTimeMillis();
+      StringBuffer buf1 = new StringBuffer("val_" + k + "__");
+      while (buf1.length() < valsize) {
+        buf1.append(randstr1);
+      }
+
+      // Write to the HRegion
+      long writeid = region.startUpdate(new Text("row_" + k));
+      region.put(writeid, CONTENTS_BODY, new 
BytesWritable(buf1.toString().getBytes()));
+      region.commit(writeid);
+      if (k > 0 && k % (N_ROWS / 100) == 0) {
+        System.out.println("Flushing write #" + k);
+
+        long flushStart = System.currentTimeMillis();
+        region.flushcache(false);
+        long flushEnd = System.currentTimeMillis();
+        totalFlush += (flushEnd - flushStart);
+
+        if (k % (N_ROWS / 10) == 0) {
+          System.out.print("Rolling log...");
+          long logStart = System.currentTimeMillis();
+          log.rollWriter();
+          long logEnd = System.currentTimeMillis();
+          totalLog += (logEnd - logStart);
+          System.out.println("  elapsed time: " + ((logEnd - logStart) / 
1000.0));
         }
       }
-      long startCompact = System.currentTimeMillis();
-      if(region.compactStores()) {
-        totalCompact = System.currentTimeMillis() - startCompact;
-        System.out.println("Region compacted - elapsedTime: " + (totalCompact 
/ 1000.0));
-        
-      } else {
-        System.out.println("No compaction required.");
-      }
-      long endTime = System.currentTimeMillis();
-
-      long totalElapsed = (endTime - startTime);
-      System.out.println();
-      System.out.println("Batch-write complete.");
-      System.out.println("Wrote " + N_ROWS + " rows, each of ~" + valsize + " 
bytes");
-      System.out.println("Total flush-time: " + (totalFlush / 1000.0));
-      System.out.println("Total compact-time: " + (totalCompact / 1000.0));
-      System.out.println("Total log-time: " + (totalLog / 1000.0));
-      System.out.println("Total time elapsed: " + (totalElapsed / 1000.0));
-      System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed 
/ 1000.0)));
-      System.out.println("Adjusted time (not including flush, compact, or 
log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
-      System.out.println("Adjusted time, rows/second: " + (N_ROWS / 
((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
-      System.out.println();
-      
-    } catch(IOException e) {
-      failures = true;
-      throw e;
     }
+    long startCompact = System.currentTimeMillis();
+    if(region.compactStores()) {
+      totalCompact = System.currentTimeMillis() - startCompact;
+      System.out.println("Region compacted - elapsedTime: " + (totalCompact / 
1000.0));
+
+    } else {
+      System.out.println("No compaction required.");
+    }
+    long endTime = System.currentTimeMillis();
+
+    long totalElapsed = (endTime - startTime);
+    System.out.println();
+    System.out.println("Batch-write complete.");
+    System.out.println("Wrote " + N_ROWS + " rows, each of ~" + valsize + " 
bytes");
+    System.out.println("Total flush-time: " + (totalFlush / 1000.0));
+    System.out.println("Total compact-time: " + (totalCompact / 1000.0));
+    System.out.println("Total log-time: " + (totalLog / 1000.0));
+    System.out.println("Total time elapsed: " + (totalElapsed / 1000.0));
+    System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 
1000.0)));
+    System.out.println("Adjusted time (not including flush, compact, or log): 
" + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
+    System.out.println("Adjusted time, rows/second: " + (N_ROWS / 
((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
+    System.out.println();
+
   }
 
   // NOTE: This test depends on testBatchWrite succeeding
   
-  public void testSplitAndMerge() throws IOException {
-    if(!initialized || failures) {
-      throw new IllegalStateException();
+  private void splitAndMerge() throws IOException {
+    Text midKey = new Text();
+
+    if(region.needsSplit(midKey)) {
+      System.out.println("Needs split");
     }
+
+    // Split it anyway
+
+    Text midkey = new Text("row_"
+        + (StaticTestEnvironment.debugging ? (N_ROWS / 2) : (NUM_VALS/2)));
     
-    try {
-      Text midKey = new Text();
-      
-      if(region.needsSplit(midKey)) {
-        System.out.println("Needs split");
-      }
-      
-      // Split it anyway
+    Path oldRegionPath = region.getRegionDir();
 
-      Text midkey = new Text("row_" + (Environment.debugging ? (N_ROWS / 2) : 
(NUM_VALS/2)));
-      Path oldRegionPath = region.getRegionDir();
-      
-      long startTime = System.currentTimeMillis();
-      
-      HRegion subregions[] = region.closeAndSplit(midkey);
-      
-      System.out.println("Split region elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
-      
-      assertEquals("Number of subregions", subregions.length, 2);
+    long startTime = System.currentTimeMillis();
 
-      // Now merge it back together
+    HRegion subregions[] = region.closeAndSplit(midkey, this);
 
-      Path oldRegion1 = subregions[0].getRegionDir();
-      Path oldRegion2 = subregions[1].getRegionDir();
-      
-      startTime = System.currentTimeMillis();
-      
-      region = HRegion.closeAndMerge(subregions[0], subregions[1]);
+    System.out.println("Split region elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
 
-      System.out.println("Merge regions elapsed time: "
-          + ((System.currentTimeMillis() - startTime) / 1000.0));
-      
-      fs.delete(oldRegionPath);
-      fs.delete(oldRegion1);
-      fs.delete(oldRegion2);
-      
-    } catch(IOException e) {
-      failures = true;
-      throw e;
-    }
+    assertEquals("Number of subregions", subregions.length, 2);
+
+    // Now merge it back together
+
+    Path oldRegion1 = subregions[0].getRegionDir();
+    Path oldRegion2 = subregions[1].getRegionDir();
+
+    startTime = System.currentTimeMillis();
+
+    region = HRegion.closeAndMerge(subregions[0], subregions[1]);
+
+    System.out.println("Merge regions elapsed time: "
+        + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+    fs.delete(oldRegionPath);
+    fs.delete(oldRegion1);
+    fs.delete(oldRegion2);
   }
 
+  /* (non-Javadoc)
+   * @see 
org.apache.hadoop.hbase.RegionUnavailableListener#regionIsUnavailable(org.apache.hadoop.io.Text)
+   */
+  public void regionIsUnavailable(Text regionName) {
+    // We don't use this here. It is only for the HRegionServer
+  }
+  
   // This test verifies that everything is still there after splitting and 
merging
   
-  public void testRead() throws IOException {
-    if(!initialized || failures) {
-      throw new IllegalStateException();
-    }
+  private void read() throws IOException {
 
     // First verify the data written by testBasic()
 
@@ -820,9 +746,8 @@
     
     // Verify testBatchWrite data
 
-    if(Environment.debugging) {
+    if(StaticTestEnvironment.debugging) {
       startTime = System.currentTimeMillis();
-      
       s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
       try {
         int numFetched = 0;
@@ -883,7 +808,6 @@
       s.close();
     }
   }
-
   
   private static void deleteFile(File f) {
     if(f.isDirectory()) {
@@ -895,18 +819,14 @@
     f.delete();
   }
   
-  public void testCleanup() throws IOException {
-    if(!initialized) {
-      throw new IllegalStateException();
-    }
+  private void cleanup() throws IOException {
 
     // Shut down the mini cluster
-    
+
     cluster.shutdown();
-    
+
     // Delete all the DFS files
-    
+
     deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
-    
-    }
+  }
 }

Modified: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java?view=diff&rev=539243&r1=539242&r2=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
 (original)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
 Thu May 17 20:22:54 2007
@@ -17,9 +17,7 @@
 
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
-import java.io.File;
 import java.io.IOException;
-import java.util.Enumeration;
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
@@ -30,16 +28,7 @@
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
 
-import org.apache.log4j.Appender;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-
-import junit.framework.TestCase;
-
-public class TestScanner extends TestCase {
+public class TestScanner extends HBaseTestCase {
   private static final Text FIRST_ROW = new Text();
   private static final Text[] COLS = {
       HConstants.COLUMN_FAMILY
@@ -127,12 +116,7 @@
       
       } finally {
         if(scanner != null) {
-          try {
-            scanner.close();
-          
-          } catch(IOException e) {
-            e.printStackTrace();
-          }
+          scanner.close();
           scanner = null;
         }
       }
@@ -146,7 +130,6 @@
   }
  
   /** The test! */
-  @SuppressWarnings("unchecked")
   public void testScanner() throws IOException {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
@@ -155,37 +138,7 @@
       
       // Initialization
       
-      if(System.getProperty("test.build.data") == null) {
-        String dir = new File(new File("").getAbsolutePath(), 
"build/contrib/hbase/test").getAbsolutePath();
-        System.out.println(dir);
-        System.setProperty("test.build.data", dir);
-      }
       Configuration conf = new HBaseConfiguration();
-    
-      Environment.getenv();
-      if(Environment.debugging) {
-        Logger rootLogger = Logger.getRootLogger();
-        rootLogger.setLevel(Level.WARN);
-
-        ConsoleAppender consoleAppender = null;
-        for(Enumeration<Appender> e = 
(Enumeration<Appender>)rootLogger.getAllAppenders();
-            e.hasMoreElements();) {
-        
-          Appender a = e.nextElement();
-          if(a instanceof ConsoleAppender) {
-            consoleAppender = (ConsoleAppender)a;
-            break;
-          }
-        }
-        if(consoleAppender != null) {
-          Layout layout = consoleAppender.getLayout();
-          if(layout instanceof PatternLayout) {
-            PatternLayout consoleLayout = (PatternLayout)layout;
-            consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
-          }
-        }
-        
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
-      }
       cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
       fs = cluster.getFileSystem();
       Path dir = new Path("/hbase");

Added: 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
URL: 
http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java?view=auto&rev=539243
==============================================================================
--- 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
 (added)
+++ 
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
 Thu May 17 20:22:54 2007
@@ -0,0 +1,77 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+/** Tests table creation restrictions*/
+public class TestTable extends HBaseClusterTestCase {
+  public TestTable() {
+    super(true);
+  }
+
+  public void testTable() {
+    HClient client = new HClient(conf);
+    
+    try {
+      client.createTable(HGlobals.rootTableDesc);
+      
+    } catch(IllegalArgumentException e) {
+      // Expected - ignore it
+      
+    } catch(Exception e) {
+      System.err.println("Unexpected exception");
+      e.printStackTrace();
+      fail();
+    }
+    
+    try {
+      client.createTable(HGlobals.metaTableDesc);
+      
+    } catch(IllegalArgumentException e) {
+      // Expected - ignore it
+      
+    } catch(Exception e) {
+      System.err.println("Unexpected exception");
+      e.printStackTrace();
+      fail();
+    }
+
+    HTableDescriptor desc = new HTableDescriptor("test", 1);
+    desc.addFamily(HConstants.COLUMN_FAMILY);
+
+    try {
+      client.createTable(desc);
+      
+    } catch(Exception e) {
+      System.err.println("Unexpected exception");
+      e.printStackTrace();
+      fail();
+    }
+
+    try {
+      client.createTable(desc);
+      
+    } catch(IOException e) {
+      // Expected. Ignore it.
+      
+    } catch(Exception e) {
+      System.err.println("Unexpected exception");
+      e.printStackTrace();
+      fail();
+    }
+}
+}


Reply via email to