Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/Store.java
 Sat Jul  4 02:16:16 2009
@@ -49,12 +49,15 @@
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.SequenceFile;
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
@@ -82,7 +85,7 @@
  * <p>Locking and transactions are handled at a higher level.  This API should
  * not be called directly but by an HRegion manager.
  */
-public class Store implements HConstants {
+public class Store implements HConstants, HeapSize {
   static final Log LOG = LogFactory.getLog(Store.class);
   /**
    * Comparator that looks at columns and compares their family portions.
@@ -107,6 +110,7 @@
   final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   final byte [] storeName;
   private final String storeNameStr;
+  private final boolean inMemory;
 
   /*
    * Sorted Map of readers keyed by maximum edit sequence id (Most recent 
should
@@ -187,7 +191,10 @@
     // MIN_COMMITS_FOR_COMPACTION map files
     this.compactionThreshold =
       conf.getInt("hbase.hstore.compactionThreshold", 3);
-
+    
+    // Check if this is in-memory store
+    this.inMemory = family.isInMemory();
+    
     // By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
     long maxFileSize = info.getTableDesc().getMaxFileSize();
     if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
@@ -363,7 +370,7 @@
       }
       StoreFile curfile = null;
       try {
-        curfile = new StoreFile(fs, p, blockcache, this.conf);
+        curfile = new StoreFile(fs, p, blockcache, this.conf, this.inMemory);
       } catch (IOException ioe) {
         LOG.warn("Failed open of " + p + "; presumption is that file was " +
           "corrupted at flush and lost edits picked up by commit log replay. " 
+
@@ -509,7 +516,7 @@
           if (!isExpired(kv, oldestTimestamp)) {
             writer.append(kv);
             entries++;
-            flushed += this.memstore.heapSize(kv, true);
+            flushed += this.memstore.heapSizeChange(kv, true);
           }
         }
         // B. Write out the log sequence number that corresponds to this output
@@ -520,7 +527,7 @@
       }
     }
     StoreFile sf = new StoreFile(this.fs, writer.getPath(), blockcache, 
-      this.conf);
+      this.conf, this.inMemory);
     Reader r = sf.getReader();
     this.storeSize += r.length();
     if(LOG.isDebugEnabled()) {
@@ -674,12 +681,12 @@
           LOG.warn("Path is null for " + file);
           return null;
         }
-        Reader r = file.getReader();
+        CompactionReader r = file.getCompactionReader();
         if (r == null) {
           LOG.warn("StoreFile " + file + " has a null Reader");
           continue;
         }
-        long len = file.getReader().length();
+        long len = file.getCompactionReader().length();
         fileSizes[i] = len;
         totalSize += len;
       }
@@ -838,7 +845,7 @@
     // init:
     for (int i = 0; i < filesToCompact.size(); ++i) {
       // TODO open a new HFile.Reader w/o block cache.
-      Reader r = filesToCompact.get(i).getReader();
+      CompactionReader r = filesToCompact.get(i).getCompactionReader();
       if (r == null) {
         LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader");
         continue;
@@ -919,7 +926,7 @@
       return;
     }
     StoreFile finalCompactedFile = new StoreFile(this.fs, p, blockcache, 
-      this.conf);
+      this.conf, this.inMemory);
     this.lock.writeLock().lock();
     try {
       try {
@@ -953,7 +960,7 @@
       // 4. Compute new store size
       this.storeSize = 0L;
       for (StoreFile hsf : this.storefiles.values()) {
-        Reader r = hsf.getReader();
+        Reader r = hsf.getCompactionReader();
         if (r == null) {
           LOG.warn("StoreFile " + hsf + " has a null Reader");
           continue;
@@ -1626,4 +1633,19 @@
         Bytes.toBytes(newValue));
     return new ICVResult(newValue, newKv.heapSize(), newKv);
   }
+
+  public static final long FIXED_OVERHEAD = ClassSize.align(
+      ClassSize.OBJECT + (17 * ClassSize.REFERENCE) +
+      (5 * Bytes.SIZEOF_LONG) + (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN +
+      ClassSize.align(ClassSize.ARRAY));
+  
+  public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
+      ClassSize.OBJECT + ClassSize.REENTRANT_LOCK + 
+      ClassSize.CONCURRENT_SKIPLISTMAP + 
+      ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + ClassSize.OBJECT);
+      
+  @Override
+  public long heapSize() {
+    return DEEP_OVERHEAD + this.memstore.heapSize();
+  }
 }
\ No newline at end of file

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 Sat Jul  4 02:16:16 2009
@@ -42,6 +42,7 @@
 import org.apache.hadoop.hbase.io.hfile.Compression;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
 
@@ -75,6 +76,8 @@
   private Path referencePath;
   // Should the block cache be used or not.
   private boolean blockcache;
+  // Is this from an in-memory store
+  private boolean inMemory;
   
   // Keys for metadata stored in backing HFile.
   private static final byte [] MAX_SEQ_ID_KEY = 
Bytes.toBytes("MAX_SEQ_ID_KEY");
@@ -112,12 +115,13 @@
    * @throws IOException When opening the reader fails.
    */
   StoreFile(final FileSystem fs, final Path p, final boolean blockcache, 
-      final HBaseConfiguration conf) 
+      final HBaseConfiguration conf, final boolean inMemory) 
   throws IOException {
     this.conf = conf;
     this.fs = fs;
     this.path = p;
     this.blockcache = blockcache;
+    this.inMemory = inMemory;
     if (isReference(p)) {
       this.reference = Reference.read(fs, p);
       this.referencePath = getReferredToFile(this.path);
@@ -262,7 +266,8 @@
       this.reader = new HalfHFileReader(this.fs, this.referencePath, 
           getBlockCache(), this.reference);
     } else {
-      this.reader = new StoreFileReader(this.fs, this.path, getBlockCache());
+      this.reader = new Reader(this.fs, this.path, getBlockCache(),
+          this.inMemory);
     }
     // Load up indices and fileinfo.
     Map<byte [], byte []> map = this.reader.loadFileInfo();
@@ -298,71 +303,18 @@
   }
 
   /**
-   * Override to add some customization on HFile.Reader
-   */
-  static class StoreFileReader extends HFile.Reader {
-    /**
-     * 
-     * @param fs
-     * @param path
-     * @param cache
-     * @throws IOException
-     */
-    public StoreFileReader(FileSystem fs, Path path, BlockCache cache)
-        throws IOException {
-      super(fs, path, cache);
-    }
-
-    @Override
-    protected String toStringFirstKey() {
-      return KeyValue.keyToString(getFirstKey());
-    }
-
-    @Override
-    protected String toStringLastKey() {
-      return KeyValue.keyToString(getLastKey());
-    }
-  }
-
-  /**
-   * Override to add some customization on HalfHFileReader.
+   * @return Current reader.  Must call open first else returns null.
    */
-  static class HalfStoreFileReader extends HalfHFileReader {
-    /**
-     * 
-     * @param fs
-     * @param p
-     * @param c
-     * @param r
-     * @throws IOException
-     */
-    public HalfStoreFileReader(FileSystem fs, Path p, BlockCache c, Reference 
r)
-        throws IOException {
-      super(fs, p, c, r);
-    }
-
-    @Override
-    public String toString() {
-      return super.toString() + (isTop()? ", half=top": ", half=bottom") +
-          " splitKey: " + KeyValue.keyToString(splitkey);
-    }
-
-    @Override
-    protected String toStringFirstKey() {
-      return KeyValue.keyToString(getFirstKey());
-    }
-
-    @Override
-    protected String toStringLastKey() {
-      return KeyValue.keyToString(getLastKey());
-    }
+  public HFile.Reader getReader() {
+    return this.reader;
   }
 
   /**
+   * Gets a special Reader for use during compactions.  Will not cache blocks.
    * @return Current reader.  Must call open first else returns null.
    */
-  public HFile.Reader getReader() {
-    return this.reader;
+  public HFile.CompactionReader getCompactionReader() {
+    return new HFile.CompactionReader(this.reader);
   }
 
   /**

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/ClassSize.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/ClassSize.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/ClassSize.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/ClassSize.java
 Sat Jul  4 02:16:16 2009
@@ -73,7 +73,25 @@
   
   /** Overhead for ConcurrentHashMap.Segment */
   public static int CONCURRENT_HASHMAP_SEGMENT = 0;
-
+  
+  /** Overhead for ConcurrentSkipListMap */
+  public static int CONCURRENT_SKIPLISTMAP = 0;
+  
+  /** Overhead for ConcurrentSkipListMap Entry */
+  public static int CONCURRENT_SKIPLISTMAP_ENTRY = 0;
+  
+  /** Overhead for ReentrantReadWriteLock */
+  public static int REENTRANT_LOCK = 0;
+  
+  /** Overhead for AtomicLong */
+  public static int ATOMIC_LONG = 0;
+  
+  /** Overhead for AtomicInteger */
+  public static int ATOMIC_INTEGER = 0;
+  
+  /** Overhead for AtomicBoolean */
+  public static int ATOMIC_BOOLEAN = 0;
+  
   private static final String THIRTY_TWO = "32";
 
   /**
@@ -118,6 +136,21 @@
       
     CONCURRENT_HASHMAP_SEGMENT = align(REFERENCE + OBJECT + 
         (3 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_FLOAT + ARRAY);
+    
+    CONCURRENT_SKIPLISTMAP = align(Bytes.SIZEOF_INT + OBJECT + (8 * 
REFERENCE));
+    
+    CONCURRENT_SKIPLISTMAP_ENTRY = align(
+        align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ 
+        align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */
+    
+    REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE));
+    
+    ATOMIC_LONG = align(OBJECT + Bytes.SIZEOF_LONG);
+    
+    ATOMIC_INTEGER = align(OBJECT + Bytes.SIZEOF_INT);
+    
+    ATOMIC_BOOLEAN = align(OBJECT + Bytes.SIZEOF_BOOLEAN);
+    
   }
   
   /**

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
 Sat Jul  4 02:16:16 2009
@@ -32,6 +32,7 @@
 import org.apache.zookeeper.server.ZooKeeperServerMain;
 import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
 import org.apache.zookeeper.server.quorum.QuorumPeerMain;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException;
 
 /**
  * HBase's version of ZooKeeper's QuorumPeer. When HBase is set to manage
@@ -42,61 +43,113 @@
  */
 public class HQuorumPeer implements HConstants {
   private static final Log LOG = LogFactory.getLog(HQuorumPeer.class);
+
   private static final String VARIABLE_START = "${";
   private static final int VARIABLE_START_LENGTH = VARIABLE_START.length();
   private static final String VARIABLE_END = "}";
   private static final int VARIABLE_END_LENGTH = VARIABLE_END.length();
 
+  private static final String ZK_CFG_PROPERTY = "hbase.zookeeper.property.";
+  private static final int ZK_CFG_PROPERTY_SIZE = ZK_CFG_PROPERTY.length();
+
   /**
    * Parse ZooKeeper configuration and run a QuorumPeer.
    * While parsing the zoo.cfg, we substitute variables with values from
    * hbase-site.xml.
    * @param args String[] of command line arguments. Not used.
-   * @throws IOException 
+   * @throws IOException
    */
   public static void main(String[] args) throws IOException {
-    QuorumPeerConfig config = new QuorumPeerConfig();
+    HBaseConfiguration conf = new HBaseConfiguration();
+    Properties zkProperties = makeZKProps(conf);
+
+    QuorumPeerConfig zkConfig = new QuorumPeerConfig();
     try {
-      Properties properties = parseZooKeeperConfig();
-      config.parseProperties(properties);
-    } catch (Exception e) {
+      zkConfig.parseProperties(zkProperties);
+    } catch (ConfigException e) {
       e.printStackTrace();
       System.exit(-1);
     }
-    if (config.isDistributed()) {
+
+    startZKServer(zkConfig);
+  }
+
+  private static void startZKServer(QuorumPeerConfig zkConfig) throws 
IOException {
+    if (zkConfig.isDistributed()) {
       QuorumPeerMain qp = new QuorumPeerMain();
-      qp.runFromConfig(config);
+      qp.runFromConfig(zkConfig);
     } else {
       ZooKeeperServerMain zk = new ZooKeeperServerMain();
       ServerConfig serverConfig = new ServerConfig();
-      serverConfig.readFrom(config);
+      serverConfig.readFrom(zkConfig);
       zk.runFromConfig(serverConfig);
     }
   }
 
   /**
-   * Parse ZooKeeper's zoo.cfg, injecting HBase Configuration variables in.
-   * @return Properties parsed from config stream with variables substituted.
-   * @throws IOException if anything goes wrong parsing config
+   * Make a Properties object holding ZooKeeper config equivalent to zoo.cfg.
+   * If there is a zoo.cfg in the classpath, simply read it in. Otherwise parse
+   * the corresponding config options from the HBase XML configs and generate
+   * the appropriate ZooKeeper properties.
+   * @param conf HBaseConfiguration to read from.
+   * @return Properties holding mappings representing ZooKeeper zoo.cfg file.
    */
-  public static Properties parseZooKeeperConfig() throws IOException {
+  public static Properties makeZKProps(HBaseConfiguration conf) {
+    // First check if there is a zoo.cfg in the CLASSPATH. If so, simply read
+    // it and grab its configuration properties.
     ClassLoader cl = HQuorumPeer.class.getClassLoader();
     InputStream inputStream = cl.getResourceAsStream(ZOOKEEPER_CONFIG_NAME);
-    if (inputStream == null) {
-      throw new IOException(ZOOKEEPER_CONFIG_NAME + " not found");
+    if (inputStream != null) {
+      try {
+        return parseZooCfg(conf, inputStream);
+      } catch (IOException e) {
+        LOG.warn("Cannot read " + ZOOKEEPER_CONFIG_NAME +
+                 ", loading from XML files", e);
+      }
     }
-    return parseConfig(inputStream);
+
+    // Otherwise, use the configuration options from HBase's XML files.
+    Properties zkProperties = new Properties();
+
+    // Directly map all of the hbase.zookeeper.property.KEY properties.
+    for (Entry<String, String> entry : conf) {
+      String key = entry.getKey();
+      if (key.startsWith(ZK_CFG_PROPERTY)) {
+        String zkKey = key.substring(ZK_CFG_PROPERTY_SIZE);
+        String value = entry.getValue();
+        // If the value has variables substitutions, need to do a get.
+        if (value.contains(VARIABLE_START)) {
+          value = conf.get(key);
+        }
+        zkProperties.put(zkKey, value);
+      }
+    }
+
+    // Create the server.X properties.
+    int peerPort = conf.getInt("hbase.zookeeper.peerport", 2888);
+    int leaderPort = conf.getInt("hbase.zookeeper.leaderport", 3888);
+
+    String[] serverHosts = conf.getStrings(ZOOKEEPER_QUORUM, "localhost");
+    for (int i = 0; i < serverHosts.length; ++i) {
+      String serverHost = serverHosts[i];
+      String address = serverHost + ":" + peerPort + ":" + leaderPort;
+      String key = "server." + i;
+      zkProperties.put(key, address);
+    }
+
+    return zkProperties;
   }
 
   /**
    * Parse ZooKeeper's zoo.cfg, injecting HBase Configuration variables in.
    * This method is used for testing so we can pass our own InputStream.
+   * @param conf HBaseConfiguration to use for injecting variables.
    * @param inputStream InputStream to read from.
    * @return Properties parsed from config stream with variables substituted.
    * @throws IOException if anything goes wrong parsing config
    */
-  public static Properties parseConfig(InputStream inputStream) throws 
IOException {
-    HBaseConfiguration conf = new HBaseConfiguration();
+  public static Properties parseZooCfg(HBaseConfiguration conf,
+      InputStream inputStream) throws IOException {
     Properties properties = new Properties();
     try {
       properties.load(inputStream);
@@ -130,7 +183,7 @@
           LOG.fatal(msg);
           throw new IOException(msg);
         }
-        
+
         newValue.append(substituteValue);
 
         varEnd += VARIABLE_END_LENGTH;
@@ -138,7 +191,7 @@
       }
       // Special case for 'hbase.cluster.distributed' property being 'true'
       if (key.startsWith("server.")) {
-        if(conf.get(CLUSTER_DISTRIBUTED).equals(CLUSTER_IS_DISTRIBUTED) && 
+        if(conf.get(CLUSTER_DISTRIBUTED).equals(CLUSTER_IS_DISTRIBUTED) &&
             value.startsWith("localhost")) {
            String msg = "The server in zoo.cfg cannot be set to localhost " +
               "in a fully-distributed setup because it won't be reachable. " +

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
 Sat Jul  4 02:16:16 2009
@@ -56,18 +56,15 @@
   // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
   private static final char ZNODE_PATH_SEPARATOR = '/';
 
-  private static String quorumServers = null;
-  static {
-    loadZooKeeperConfig();
-  }
+  private String quorumServers = null;
 
   private final ZooKeeper zooKeeper;
 
   private final String parentZNode;
-  public final String rootRegionZNode;
-  public final String outOfSafeModeZNode;
-  public final String rsZNode;
-  public final String masterElectionZNode;
+  private final String rootRegionZNode;
+  private final String outOfSafeModeZNode;
+  private final String rsZNode;
+  private final String masterElectionZNode;
   public final String clusterStateZNode;
 
   /**
@@ -78,6 +75,8 @@
    */
   public ZooKeeperWrapper(HBaseConfiguration conf, Watcher watcher)
   throws IOException {
+    Properties properties = HQuorumPeer.makeZKProps(conf);
+    setQuorumServers(properties);
     if (quorumServers == null) {
       throw new IOException("Could not read quorum servers from " +
                             ZOOKEEPER_CONFIG_NAME);
@@ -110,76 +109,7 @@
     clusterStateZNode = getZNode(parentZNode, stateZNodeName);
   }
 
-  /** @return String dump of everything in ZooKeeper. */
-  public String dump() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("\nHBase tree in ZooKeeper is rooted at ").append(parentZNode);
-    sb.append("\n  Cluster up? ").append(exists(clusterStateZNode));
-    sb.append("\n  In safe mode? ").append(!checkOutOfSafeMode());
-    sb.append("\n  Master address: ").append(readMasterAddress(null));
-    sb.append("\n  Region server holding ROOT: 
").append(readRootRegionLocation());
-    sb.append("\n  Region servers:");
-    for (HServerAddress address : scanRSDirectory()) {
-      sb.append("\n    - ").append(address);
-    }
-    return sb.toString();
-  }
-
-  private boolean exists(String znode) {
-    try {
-      return zooKeeper.exists(znode, null) != null;
-    } catch (KeeperException e) {
-      return false;
-    } catch (InterruptedException e) {
-      return false;
-    }
-  }
-
-  /** @return ZooKeeper used by this wrapper. */
-  public ZooKeeper getZooKeeper() {
-    return zooKeeper;
-  }
-
-  /**
-   * This is for testing KeeperException.SessionExpiredExcseption.
-   * See HBASE-1232.
-   * @return long session ID of this ZooKeeper session.
-   */
-  public long getSessionID() {
-    return zooKeeper.getSessionId();
-  }
-
-  /**
-   * This is for testing KeeperException.SessionExpiredExcseption.
-   * See HBASE-1232.
-   * @return byte[] password of this ZooKeeper session.
-   */
-  public byte[] getSessionPassword() {
-    return zooKeeper.getSessionPasswd();
-  }
-
-  /**
-   * This is for tests to directly set the ZooKeeper quorum servers.
-   * @param servers comma separated host:port ZooKeeper quorum servers.
-   */
-  public static void setQuorumServers(String servers) {
-    quorumServers = servers;
-  }
-
-  /** @return comma separated host:port list of ZooKeeper quorum servers. */
-  public static String getQuorumServers() {
-    return quorumServers;
-  }
-
-  private static void loadZooKeeperConfig() {
-    Properties properties = null;
-    try {
-      properties = HQuorumPeer.parseZooKeeperConfig();
-    } catch (IOException e) {
-      LOG.fatal("Fail to read properties from " + ZOOKEEPER_CONFIG_NAME, e);
-      System.exit(-1);
-    }
-
+  private void setQuorumServers(Properties properties) {
     String clientPort = null;
     List<String> servers = new ArrayList<String>();
 
@@ -217,7 +147,7 @@
     if (servers.isEmpty()) {
       LOG.fatal("No server.X lines found in conf/zoo.cfg. HBase must have a " +
                 "ZooKeeper cluster configured for its operation.");
-      System.exit(-1);
+      return;
     }
 
     StringBuilder hostPortBuilder = new StringBuilder();
@@ -232,7 +162,59 @@
     }
 
     quorumServers = hostPortBuilder.toString();
-    LOG.info("Quorum servers: " + quorumServers);
+  }
+
+  /** @return String dump of everything in ZooKeeper. */
+  public String dump() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("\nHBase tree in ZooKeeper is rooted at ").append(parentZNode);
+    sb.append("\n  Cluster up? ").append(exists(clusterStateZNode));
+    sb.append("\n  In safe mode? ").append(!checkOutOfSafeMode());
+    sb.append("\n  Master address: ").append(readMasterAddress(null));
+    sb.append("\n  Region server holding ROOT: 
").append(readRootRegionLocation());
+    sb.append("\n  Region servers:");
+    for (HServerAddress address : scanRSDirectory()) {
+      sb.append("\n    - ").append(address);
+    }
+    return sb.toString();
+  }
+
+  private boolean exists(String znode) {
+    try {
+      return zooKeeper.exists(znode, null) != null;
+    } catch (KeeperException e) {
+      return false;
+    } catch (InterruptedException e) {
+      return false;
+    }
+  }
+
+  /** @return ZooKeeper used by this wrapper. */
+  public ZooKeeper getZooKeeper() {
+    return zooKeeper;
+  }
+
+  /**
+   * This is for testing KeeperException.SessionExpiredException.
+   * See HBASE-1232.
+   * @return long session ID of this ZooKeeper session.
+   */
+  public long getSessionID() {
+    return zooKeeper.getSessionId();
+  }
+
+  /**
+   * This is for testing KeeperException.SessionExpiredException.
+   * See HBASE-1232.
+   * @return byte[] password of this ZooKeeper session.
+   */
+  public byte[] getSessionPassword() {
+    return zooKeeper.getSessionPasswd();
+  }
+
+  /** @return host:port list of quorum servers. */
+  public String getQuorumServers() {
+    return quorumServers;
   }
 
   /** @return true if currently connected to ZooKeeper, false otherwise. */

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/hbase-site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/hbase-site.xml?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/hbase-site.xml (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/hbase-site.xml Sat Jul  4 
02:16:16 2009
@@ -127,4 +127,11 @@
     <name>hadoop.log.dir</name>
     <value>${user.dir}/../logs</value>
   </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21810</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
 </configuration>

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
 Sat Jul  4 02:16:16 2009
@@ -96,7 +96,8 @@
     // Note that this is done before we create the MiniHBaseCluster because we
     // need to edit the config to add the ZooKeeper servers.
     this.zooKeeperCluster = new MiniZooKeeperCluster();
-    this.zooKeeperCluster.startup(testDir);
+    int clientPort = this.zooKeeperCluster.startup(testDir);
+    conf.set("hbase.zookeeper.property.clientPort", 
Integer.toString(clientPort));
 
     // start the mini cluster
     this.cluster = new MiniHBaseCluster(conf, regionServers);

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
 Sat Jul  4 02:16:16 2009
@@ -226,7 +226,7 @@
     
     @Override
     void setUp() throws Exception {
-      reader = new HFile.Reader(this.fs, this.mf, null);
+      reader = new HFile.Reader(this.fs, this.mf, null, false);
       this.reader.loadFileInfo();
     }
     

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniZooKeeperCluster.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniZooKeeperCluster.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniZooKeeperCluster.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniZooKeeperCluster.java
 Sat Jul  4 02:16:16 2009
@@ -70,10 +70,11 @@
 
   /**
    * @param baseDir
+   * @return ClientPort server bound to.
    * @throws IOException
    * @throws InterruptedException
    */
-  public void startup(File baseDir) throws IOException,
+  public int startup(File baseDir) throws IOException,
       InterruptedException {
     setupTestEnv();
 
@@ -96,14 +97,13 @@
     }
     standaloneServerFactory.startup(server);
 
-    String quorumServers = "localhost:" + clientPort;
-    ZooKeeperWrapper.setQuorumServers(quorumServers);
-
     if (!waitForServerUp(clientPort, CONNECTION_TIMEOUT)) {
       throw new IOException("Waiting for startup of standalone server");
     }
 
     started = true;
+
+    return clientPort;
   }
 
   private void recreateDir(File dir) throws IOException {

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
 Sat Jul  4 02:16:16 2009
@@ -26,7 +26,6 @@
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -40,7 +39,8 @@
  */
 public class TestZooKeeper extends HBaseClusterTestCase {
   private static class EmptyWatcher implements Watcher {
-    public EmptyWatcher() {}
+    public static EmptyWatcher instance = new EmptyWatcher();
+    private EmptyWatcher() {}
     public void process(WatchedEvent event) {}
   }
 
@@ -54,7 +54,7 @@
    * @throws IOException
    */
   public void testWritesRootRegionLocation() throws IOException {
-    ZooKeeperWrapper zooKeeper = new ZooKeeperWrapper(conf, new 
EmptyWatcher());
+    ZooKeeperWrapper zooKeeper = new ZooKeeperWrapper(conf, 
EmptyWatcher.instance);
 
     boolean outOfSafeMode = zooKeeper.checkOutOfSafeMode();
     assertFalse(outOfSafeMode);
@@ -82,9 +82,11 @@
    * @throws IOException
    */
   public void testParentExists() throws IOException {
+    String oldValue = conf.get("zookeeper.znode.safemode");
     conf.set("zookeeper.znode.safemode", "/a/b/c/d/e");
-    ZooKeeperWrapper zooKeeper = new ZooKeeperWrapper(conf, new 
EmptyWatcher());
+    ZooKeeperWrapper zooKeeper = new ZooKeeperWrapper(conf, 
EmptyWatcher.instance);
     assertTrue(zooKeeper.writeOutOfSafeMode());
+    conf.set("zookeeper.znode.safemode", oldValue);
   }
 
   /**
@@ -95,15 +97,15 @@
   public void testClientSessionExpired() throws IOException, 
InterruptedException {
     new HTable(conf, HConstants.META_TABLE_NAME);
 
-    String quorumServers = ZooKeeperWrapper.getQuorumServers();
+    ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
+    String quorumServers = zkw.getQuorumServers();
     int sessionTimeout = conf.getInt("zookeeper.session.timeout", 2 * 1000);
-    Watcher watcher = new EmptyWatcher();
     HConnection connection = HConnectionManager.getConnection(conf);
     ZooKeeperWrapper connectionZK = connection.getZooKeeperWrapper();
     long sessionID = connectionZK.getSessionID();
     byte[] password = connectionZK.getSessionPassword();
 
-    ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, watcher, 
sessionID, password);
+    ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, 
EmptyWatcher.instance, sessionID, password);
     zk.close();
 
     Thread.sleep(sessionTimeout * 3);
@@ -119,16 +121,16 @@
     try {
       new HTable(conf, HConstants.META_TABLE_NAME);
   
-      String quorumServers = ZooKeeperWrapper.getQuorumServers();
+      ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
+      String quorumServers = zkw.getQuorumServers();
       int sessionTimeout = conf.getInt("zookeeper.session.timeout", 2 * 1000);
-  
-      Watcher watcher = new EmptyWatcher();
+
       HRegionServer rs = cluster.getRegionServer(0);
       ZooKeeperWrapper rsZK = rs.getZooKeeperWrapper();
       long sessionID = rsZK.getSessionID();
       byte[] password = rsZK.getSessionPassword();
   
-      ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, watcher, 
sessionID, password);
+      ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, 
EmptyWatcher.instance, sessionID, password);
       zk.close();
 
       Thread.sleep(sessionTimeout * 3);

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
 Sat Jul  4 02:16:16 2009
@@ -5,13 +5,23 @@
 import java.util.ArrayList;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.RegionHistorian;
+import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.hfile.CachedBlock;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.MemStore;
+import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 
@@ -99,6 +109,60 @@
       ClassSize.estimateBase(cl, true);
       assertEquals(expected, actual);
     }
+
+    // ConcurrentHashMap
+    cl = ConcurrentHashMap.class;
+    expected = ClassSize.estimateBase(cl, false);
+    actual = ClassSize.CONCURRENT_HASHMAP;
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+
+    // ConcurrentSkipListMap
+    cl = ConcurrentSkipListMap.class;
+    expected = ClassSize.estimateBase(cl, false);
+    actual = ClassSize.CONCURRENT_SKIPLISTMAP;
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+
+    // ReentrantReadWriteLock
+    cl = ReentrantReadWriteLock.class;
+    expected = ClassSize.estimateBase(cl, false);
+    actual = ClassSize.REENTRANT_LOCK;
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+
+    // AtomicLong
+    cl = AtomicLong.class;
+    expected = ClassSize.estimateBase(cl, false);
+    actual = ClassSize.ATOMIC_LONG;
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+
+    // AtomicInteger
+    cl = AtomicInteger.class;
+    expected = ClassSize.estimateBase(cl, false);
+    actual = ClassSize.ATOMIC_INTEGER;
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+
+    // AtomicBoolean
+    cl = AtomicBoolean.class;
+    expected = ClassSize.estimateBase(cl, false);
+    actual = ClassSize.ATOMIC_BOOLEAN;
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
     
   }
   
@@ -124,18 +188,21 @@
       assertEquals(expected, actual);
     }
     
-    //LruBlockCache Overhead
-    cl = LruBlockCache.class;
-    actual = LruBlockCache.CACHE_FIXED_OVERHEAD;
+    //Put
+    cl = Put.class;
     expected = ClassSize.estimateBase(cl, false);
+    //The actual TreeMap is not included in the above calculation
+    expected += ClassSize.TREEMAP;
+    Put put = new Put(Bytes.toBytes(""));
+    actual = put.heapSize();
     if(expected != actual) {
       ClassSize.estimateBase(cl, true);
       assertEquals(expected, actual);
     }
     
-    // LruBlockCache Map Fixed Overhead
-    cl = ConcurrentHashMap.class;
-    actual = ClassSize.CONCURRENT_HASHMAP;
+    //LruBlockCache Overhead
+    cl = LruBlockCache.class;
+    actual = LruBlockCache.CACHE_FIXED_OVERHEAD;
     expected = ClassSize.estimateBase(cl, false);
     if(expected != actual) {
       ClassSize.estimateBase(cl, true);
@@ -157,17 +224,64 @@
       assertEquals(expected, actual);
     }
     
-    //Put
-    cl = Put.class;
+    // MemStore Overhead
+    cl = MemStore.class;
+    actual = MemStore.FIXED_OVERHEAD;
     expected = ClassSize.estimateBase(cl, false);
-    //The actual TreeMap is not included in the above calculation
-    expected += ClassSize.TREEMAP;
-    Put put = new Put(Bytes.toBytes(""));
-    actual = put.heapSize();
     if(expected != actual) {
       ClassSize.estimateBase(cl, true);
       assertEquals(expected, actual);
     }
+    
+    // MemStore Deep Overhead
+    actual = MemStore.DEEP_OVERHEAD;
+    expected = ClassSize.estimateBase(cl, false);
+    expected += ClassSize.estimateBase(ReentrantReadWriteLock.class, false);
+    expected += ClassSize.estimateBase(AtomicLong.class, false);
+    expected += ClassSize.estimateBase(ConcurrentSkipListMap.class, false);
+    expected += ClassSize.estimateBase(ConcurrentSkipListMap.class, false);
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      ClassSize.estimateBase(ReentrantReadWriteLock.class, true);
+      ClassSize.estimateBase(AtomicLong.class, true);
+      ClassSize.estimateBase(ConcurrentSkipListMap.class, true);
+      assertEquals(expected, actual);
+    }
+    
+    // Store Overhead
+    cl = Store.class;
+    actual = Store.FIXED_OVERHEAD;
+    expected = ClassSize.estimateBase(cl, false);
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+    
+    // Region Overhead
+    cl = HRegion.class;
+    actual = HRegion.FIXED_OVERHEAD;
+    expected = ClassSize.estimateBase(cl, false);
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+    
+    // RegionHistorian Overhead
+    cl = RegionHistorian.class;
+    actual = RegionHistorian.FIXED_OVERHEAD;
+    expected = ClassSize.estimateBase(cl, false);
+    if(expected != actual) {
+      ClassSize.estimateBase(cl, true);
+      assertEquals(expected, actual);
+    }
+    
+    // Currently NOT testing Deep Overheads of many of these classes.
+    // Deep overheads cover a vast majority of stuff, but will not be 100%
+    // accurate because it's unclear when we're referencing stuff that's 
already
+    // accounted for.  But we have satisfied our two core requirements.
+    // Sizing is quite accurate now, and our tests will throw errors if 
+    // any of these classes are modified without updating overhead sizes.
+
   }
 
 }

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
 Sat Jul  4 02:16:16 2009
@@ -66,7 +66,7 @@
     long start = System.currentTimeMillis();
     SimpleBlockCache cache = new SimpleBlockCache();
     //LruBlockCache cache = new LruBlockCache();
-    Reader reader = new HFile.Reader(lfs, path, cache);
+    Reader reader = new HFile.Reader(lfs, path, cache, false);
     reader.loadFileInfo();
     System.out.println(reader.trailer);
     long end = System.currentTimeMillis();

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
 Sat Jul  4 02:16:16 2009
@@ -122,10 +122,9 @@
     fout.close();
     FSDataInputStream fin = fs.open(ncTFile);
     Reader reader = new Reader(fs.open(ncTFile),
-      fs.getFileStatus(ncTFile).getLen(), null);
+      fs.getFileStatus(ncTFile).getLen(), null, false);
     // Load up the index.
     reader.loadFileInfo();
-    LOG.info(reader);
     HFileScanner scanner = reader.getScanner();
     // Align scanner at start of the file.
     scanner.seekTo();
@@ -186,7 +185,7 @@
     fout.close();
     FSDataInputStream fin = fs.open(mFile);
     Reader reader = new Reader(fs.open(mFile), this.fs.getFileStatus(mFile)
-        .getLen(), null);
+        .getLen(), null, false);
     reader.loadFileInfo();
     // No data -- this should return false.
     assertFalse(reader.getScanner().seekTo());
@@ -210,7 +209,7 @@
     writer.append("foo".getBytes(), "value".getBytes());
     writer.close();
     fout.close();
-    Reader reader = new Reader(fs, mFile, null);
+    Reader reader = new Reader(fs, mFile, null, false);
     reader.loadFileInfo();
     assertNull(reader.getMetaBlock("non-existant"));
   }
@@ -270,4 +269,4 @@
     }
   }
   
-}
\ No newline at end of file
+}

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
 Sat Jul  4 02:16:16 2009
@@ -236,9 +236,8 @@
 
     if ("HFile".equals(fileType)){
         HFile.Reader reader = new HFile.Reader(fs.open(path),
-          fs.getFileStatus(path).getLen(), null);
+          fs.getFileStatus(path).getLen(), null, false);
         reader.loadFileInfo();
-        System.out.println(reader);
         switch (method) {
         
           case 0:
@@ -381,4 +380,4 @@
             " the same method several times and flood cache every time and 
average it to get a" +
             " better number.");
   }
-}
\ No newline at end of file
+}

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
 Sat Jul  4 02:16:16 2009
@@ -155,9 +155,8 @@
     long totalBytes = 0;
     FSDataInputStream fsdis = fs.open(path);
     Reader reader =
-      new Reader(fsdis, fs.getFileStatus(path).getLen(), null);
+      new Reader(fsdis, fs.getFileStatus(path).getLen(), null, false);
     reader.loadFileInfo();
-    System.out.println(reader);
     KeySampler kSampler =
         new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
             keyLenGen);

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
 Sat Jul  4 02:16:16 2009
@@ -49,7 +49,7 @@
   }
   public void testSeekBefore() throws Exception {
     Path p = makeNewFile();
-    HFile.Reader reader = new HFile.Reader(fs, p, null);
+    HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     HFileScanner scanner = reader.getScanner();
     assertEquals(false, scanner.seekBefore(Bytes.toBytes("a")));
@@ -82,7 +82,7 @@
   
   public void testSeekTo() throws Exception {
     Path p = makeNewFile();
-    HFile.Reader reader = new HFile.Reader(fs, p, null);
+    HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     assertEquals(2, reader.blockIndex.count);
     HFileScanner scanner = reader.getScanner();
@@ -102,7 +102,7 @@
   
   public void testBlockContainingKey() throws Exception {
     Path p = makeNewFile();
-    HFile.Reader reader = new HFile.Reader(fs, p, null);
+    HFile.Reader reader = new HFile.Reader(fs, p, null, false);
     reader.loadFileInfo();
     System.out.println(reader.blockIndex.toString());
     // falls before the start of the file.

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
 Sat Jul  4 02:16:16 2009
@@ -169,7 +169,7 @@
     boolean containsStartRow = false;
     for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
         values()) {
-      HFileScanner scanner = f.getReader().getScanner();
+      HFileScanner scanner = f.getCompactionReader().getScanner();
       scanner.seekTo();
       do {
         byte [] row = scanner.getKeyValue().getRow();

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
 Sat Jul  4 02:16:16 2009
@@ -74,7 +74,7 @@
       new Path(new Path(this.testDir, "regionname"), "familyname"),
       2 * 1024, null, null);
     writeStoreFile(writer);
-    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf));
+    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf, 
false));
   }
   
   /*
@@ -113,7 +113,7 @@
     HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null,
       null);
     writeStoreFile(writer);
-    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf);
+    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf, 
false);
     HFile.Reader reader = hsf.getReader();
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
@@ -124,7 +124,7 @@
     byte [] finalKey = hsk.getRow();
     // Make a reference
     Path refPath = StoreFile.split(fs, dir, hsf, reader.midkey(), Range.top);
-    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf);
+    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf, false);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
     HFileScanner s = refHsf.getReader().getScanner();
@@ -158,8 +158,8 @@
     Path bottomPath = StoreFile.split(this.fs, bottomDir,
       f, midkey, Range.bottom);
     // Make readers on top and bottom.
-    HFile.Reader top = new StoreFile(this.fs, topPath, true, conf).getReader();
-    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, 
conf).getReader();
+    HFile.Reader top = new StoreFile(this.fs, topPath, true, conf, 
false).getReader();
+    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf, 
false).getReader();
     ByteBuffer previous = null;
     LOG.info("Midkey: " + Bytes.toString(midkey));
     byte [] midkeyBytes = new HStoreKey(midkey).getBytes();
@@ -212,8 +212,8 @@
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, true, conf).getReader();
-      bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
+      top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
+      bottom = new StoreFile(this.fs, bottomPath, true, conf, 
false).getReader();
       bottomScanner = bottom.getScanner();
       int count = 0;
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
@@ -256,8 +256,8 @@
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, true, conf).getReader();
-      bottom = new StoreFile(this.fs, bottomPath, true, conf).getReader();
+      top = new StoreFile(this.fs, topPath, true, conf, false).getReader();
+      bottom = new StoreFile(this.fs, bottomPath, true, conf, 
false).getReader();
       first = true;
       bottomScanner = bottom.getScanner();
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||

Modified: 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/zookeeper/HQuorumPeerTest.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/zookeeper/HQuorumPeerTest.java?rev=791050&r1=791049&r2=791050&view=diff
==============================================================================
--- 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/zookeeper/HQuorumPeerTest.java
 (original)
+++ 
hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/zookeeper/HQuorumPeerTest.java
 Sat Jul  4 02:16:16 2009
@@ -25,7 +25,9 @@
 import java.util.Properties;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
 import org.apache.zookeeper.server.quorum.QuorumPeer.QuorumServer;
 
@@ -59,10 +61,34 @@
     super.tearDown();
   }
 
-  /** @throws Exception */
-  public void testConfigInjection() throws Exception {
+  /** */
+  public void testMakeZKProps() {
+    Properties properties = HQuorumPeer.makeZKProps(conf);
+    assertEquals(2000, Integer.parseInt(properties.getProperty("tickTime")));
+    assertEquals(Integer.valueOf(10), 
Integer.valueOf(properties.getProperty("initLimit")));
+    assertEquals(Integer.valueOf(5), 
Integer.valueOf(properties.getProperty("syncLimit")));
+    assertEquals(dataDir.toString(), properties.get("dataDir"));
+    assertEquals(Integer.valueOf(21810), 
Integer.valueOf(properties.getProperty("clientPort")));
+    assertEquals("localhost:2888:3888", properties.get("server.0"));
+    assertEquals(null, properties.get("server.1"));
 
+    String oldValue = conf.get(HConstants.ZOOKEEPER_QUORUM);
+    conf.set(HConstants.ZOOKEEPER_QUORUM, "a.foo.bar,b.foo.bar,c.foo.bar");
+    properties = HQuorumPeer.makeZKProps(conf);
+    assertEquals(2000, Integer.parseInt(properties.getProperty("tickTime")));
+    assertEquals(Integer.valueOf(10), 
Integer.valueOf(properties.getProperty("initLimit")));
+    assertEquals(Integer.valueOf(5), 
Integer.valueOf(properties.getProperty("syncLimit")));
+    assertEquals(dataDir.toString(), properties.get("dataDir"));
+    assertEquals(Integer.valueOf(21810), 
Integer.valueOf(properties.getProperty("clientPort")));
+    assertEquals("a.foo.bar:2888:3888", properties.get("server.0"));
+    assertEquals("b.foo.bar:2888:3888", properties.get("server.1"));
+    assertEquals("c.foo.bar:2888:3888", properties.get("server.2"));
+    assertEquals(null, properties.get("server.3"));
+    conf.set(HConstants.ZOOKEEPER_QUORUM, oldValue);
+  }
 
+  /** @throws Exception */
+  public void testConfigInjection() throws Exception {
     String s =
       "tickTime=2000\n" +
       "initLimit=10\n" +
@@ -71,8 +97,9 @@
       "clientPort=2181\n" +
       "server.0=${hbase.master.hostname}:2888:3888\n";
 
+    System.setProperty("hbase.master.hostname", "localhost");
     InputStream is = new ByteArrayInputStream(s.getBytes());
-    Properties properties = HQuorumPeer.parseConfig(is);
+    Properties properties = HQuorumPeer.parseZooCfg(conf, is);
 
     assertEquals(Integer.valueOf(2000), 
Integer.valueOf(properties.getProperty("tickTime")));
     assertEquals(Integer.valueOf(10), 
Integer.valueOf(properties.getProperty("initLimit")));
@@ -101,7 +128,7 @@
     // Override with system property.
     System.setProperty("hbase.master.hostname", "foo.bar");
     is = new ByteArrayInputStream(s.getBytes());
-    properties = HQuorumPeer.parseConfig(is);
+    properties = HQuorumPeer.parseZooCfg(conf, is);
     assertEquals("foo.bar:2888:3888", properties.get("server.0"));
 
     config.parseProperties(properties);
@@ -109,17 +136,5 @@
     servers = config.getServers();
     server = servers.get(Long.valueOf(0));
     assertEquals("foo.bar", server.addr.getHostName());
-
-    // Special case for property 'hbase.master.hostname' being 'local'
-    System.setProperty("hbase.master.hostname", "local");
-    is = new ByteArrayInputStream(s.getBytes());
-    properties = HQuorumPeer.parseConfig(is);
-    assertEquals("localhost:2888:3888", properties.get("server.0"));
-
-    config.parseProperties(properties);
-
-    servers = config.getServers();
-    server = servers.get(Long.valueOf(0));
-    assertEquals("localhost", server.addr.getHostName());
   }
 }


Reply via email to