[EAGLE-737] Fix eagle-embed checkstyle warnings

[EAGLE-737]Fix eagle-embed checkstyle warnings

https://issues.apache.org/jira/browse/EAGLE-737

Author: koone <luokun1...@126.com>

Closes #634 from koone/EAGLE-737.


Project: http://git-wip-us.apache.org/repos/asf/incubator-eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-eagle/commit/a88e1cef
Tree: http://git-wip-us.apache.org/repos/asf/incubator-eagle/tree/a88e1cef
Diff: http://git-wip-us.apache.org/repos/asf/incubator-eagle/diff/a88e1cef

Branch: refs/heads/master
Commit: a88e1cef80b3f9c6e896d5948154cd5b5b37d643
Parents: 2b61cef
Author: koone <luokun1...@126.com>
Authored: Wed Nov 9 21:58:55 2016 +0800
Committer: Hao Chen <h...@apache.org>
Committed: Wed Nov 9 21:58:55 2016 +0800

----------------------------------------------------------------------
 .../eagle/service/hbase/EmbeddedHbase.java      |   30 +-
 .../org/apache/eagle/service/hbase/Tables.java  |    2 +-
 .../src/main/resources/hbase-default.xml        | 1809 +++++++++---------
 .../eagle/service/hbase/TestHBaseBase.java      |   10 +-
 .../embedded/tomcat/TestEmbeddedServer.java     |   12 +-
 eagle-core/eagle-embed/pom.xml                  |   12 +
 6 files changed, 947 insertions(+), 928 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a88e1cef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
index be614d3..84661e9 100644
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
+++ 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
@@ -27,21 +27,21 @@ public class EmbeddedHbase {
     private HBaseTestingUtility util;
     private MiniHBaseCluster hbaseCluster;
     private static EmbeddedHbase hbase;
-    private int port;    
+    private int port;
     private String znode;
     private static int DEFAULT_PORT = 2181;
     private static String DEFAULT_ZNODE = "/hbase-unsecure";
     private static final Logger LOG = 
LoggerFactory.getLogger(EmbeddedHbase.class);
-    
+
     private EmbeddedHbase(int port, String znode) {
         this.port = port;
         this.znode = znode;
     }
-    
+
     private EmbeddedHbase(int port) {
         this(port, DEFAULT_ZNODE);
     }
-    
+
     public static EmbeddedHbase getInstance(Configuration conf) {
         if (hbase == null) {
             synchronized (EmbeddedHbase.class) {
@@ -55,7 +55,7 @@ public class EmbeddedHbase {
     }
 
     public static EmbeddedHbase getInstance() {
-        return  getInstance(null);
+        return getInstance(null);
     }
 
     private EmbeddedHbase() {
@@ -70,7 +70,7 @@ public class EmbeddedHbase {
         try {
             util = new HBaseTestingUtility();
             Configuration conf = util.getConfiguration();
-            if(confMap != null) {
+            if (confMap != null) {
                 conf.addResource(confMap);
             }
             conf.setInt("test.hbase.zookeeper.property.clientPort", port);
@@ -81,29 +81,29 @@ public class EmbeddedHbase {
             // start mini hbase cluster
             hbaseCluster = util.startMiniCluster();
             Configuration config = hbaseCluster.getConf();
-            
+
             config.set("zookeeper.session.timeout", "120000");
             config.set("hbase.zookeeper.property.tickTime", "6000");
             config.set(HConstants.HBASE_CLIENT_PAUSE, "3000");
             config.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "1");
             config.set(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, "60000");
-            
+
             Runtime.getRuntime().addShutdownHook(new Thread() {
                 @Override
                 public void run() {
                     shutdown();
                 }
-            }); 
+            });
         } catch (Throwable t) {
-            LOG.error("Got an exception: ",t);
+            LOG.error("Got an exception: ", t);
         }
     }
 
-    public void shutdown() {        
+    public void shutdown() {
         try {
             util.shutdownMiniCluster();
         } catch (Throwable t) {
-            LOG.info("Got an exception, " + t , t.getCause());
+            LOG.info("Got an exception, " + t, t.getCause());
             try {
                 util.shutdownMiniCluster();
             } catch (Throwable t1) {
@@ -111,15 +111,15 @@ public class EmbeddedHbase {
             }
         }
     }
-    
+
     public void createTable(String tableName, String cf) {
-        try {            
+        try {
             util.createTable(tableName, cf);
         } catch (Exception ex) {
             LOG.warn("Create table failed, probably table already existed, 
table name: " + tableName);
         }
     }
-    
+
     public void deleteTable(String tableName) {
         try {
             util.deleteTable(tableName);

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a88e1cef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
index b659beb..a26e95e 100644
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
+++ 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
@@ -46,7 +46,7 @@ public class Tables {
         tables.add("userprofile");
     }
 
-    public List<String> getTables(){
+    public List<String> getTables() {
         return this.tables;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a88e1cef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
index 62cdff1..babf844 100644
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
+++ 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
@@ -20,916 +20,921 @@
  */
 -->
 <configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>file:///tmp/hbase-${user.name}/hbase</value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>false</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/tmp/hbase-${user.name}</value>
-    <description>Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the HBase Master web UI.
-      Set to -1 if you do not want a UI instance run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.write.buffer</name>
-    <value>2097152</value>
-    <description>Default size of the HTable clien write buffer in bytes.
-      A bigger buffer takes more memory -- on both the client and server
-      side since server instantiates the passed write buffer to process
-      it -- but a larger buffer size reduces the number of RPCs made.
-      For an estimate of server-side memory-used, evaluate
-      hbase.client.write.buffer * hbase.regionserver.handler.count
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.port</name>
-    <value>60020</value>
-    <description>The port the HBase RegionServer binds to.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI
-      Set to -1 if you do not want the RegionServer UI to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>false</value>
-    <description>Whether or not the Master or RegionServer
-      UI should search for a port to bind to. Enables automatic port
-      search if hbase.regionserver.info.port is already in use.
-      Useful for testing, turned off by default.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The address for the HBase RegionServer web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.class</name>
-    <value>org.apache.hadoop.hbase.ipc.HRegionInterface</value>
-    <description>The RegionServer interface to use.
-      Used by the client opening proxy to remote region server.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>1000</value>
-    <description>General client pause value.  Used mostly as value to wait
-      before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.client.retries.number</name>
-    <value>10</value>
-    <description>Maximum retries.  Used as maximum for all retryable
-      operations such as fetching of the root region from root region
-      server, getting a cell's value, starting a row update, etc.
-      Default: 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.bulkload.retries.number</name>
-    <value>0</value>
-    <description>Maximum retries.  This is maximum number of iterations
-      to atomic bulk loads are attempted in the face of splitting operations
-      0 means never give up.  Default: 0.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>1</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache 
is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.lease.period</name>
-    <value>60000</value>
-    <description>HRegion server lease period in milliseconds. Default is
-      60 seconds. Clients must report in within this period else they are
-      considered dead.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>10</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-      Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>3000</value>
-    <description>Interval between messages from the RegionServer to Master
-      in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionallogflushinterval</name>
-    <value>1000</value>
-    <description>Sync the HLog to the HDFS after this interval if it has not
-      accumulated enough entries to trigger a sync. Default 1 second. Units:
-      milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.regionSplitLimit</name>
-    <value>2147483647</value>
-    <description>Limit for the number of regions after which no more region
-      splitting should take place. This is not a hard limit for the number of
-      regions but acts as a guideline for the regionserver to stop splitting 
after
-      a certain limit. Default is set to MAX_INT; i.e. do not block splitting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.logroll.period</name>
-    <value>3600000</value>
-    <description>Period at which we will roll the commit log regardless
-      of how many edits it has.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.logroll.errors.tolerated</name>
-    <value>2</value>
-    <description>The number of consecutive WAL close errors we will allow
-      before triggering a server abort.  A setting of 0 will cause the
-      region server to abort if closing the current WAL writer fails during
-      log rolling.  Even a small value (2 or 3) will allow a region server
-      to ride over transient HDFS errors.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.hlog.reader.impl</name>
-    
<value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader</value>
-    <description>The HLog file reader implementation.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.hlog.writer.impl</name>
-    
<value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogWriter</value>
-    <description>The HLog file writer implementation.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.nbreservationblocks</name>
-    <value>4</value>
-    <description>The number of resevoir blocks of memory release on
-      OOME so we can cleanup properly before server shutdown.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a ZooKeeper 
server
-      should report its IP address.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a ZooKeeper server should use to determine the host name used by 
the
-      master for communication and display purposes.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a region server
-      should report its IP address.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a region server should use to determine the host name used by the
-      master for communication and display purposes.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a master
-      should report its IP address.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a master should use to determine the host name used
-      for communication and display purposes.
-    </description>
-  </property>
-  <property>
-    <name>hbase.balancer.period
-    </name>
-    <value>300000</value>
-    <description>Period at which the region balancer runs in the Master.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regions.slop</name>
-    <value>0.2</value>
-    <description>Rebalance if any regionserver has average + (average * slop) 
regions.
-      Default is 20% slop.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.logcleaner.ttl</name>
-    <value>600000</value>
-    <description>Maximum time a HLog can stay in the .oldlogdir directory,
-      after which it will be cleaned by a Master thread.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.logcleaner.plugins</name>
-    <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
-    <description>A comma-separated list of LogCleanerDelegate invoked by
-      the LogsCleaner service. These WAL/HLog cleaners are called in order,
-      so put the HLog cleaner that prunes the most HLog files in front. To
-      implement your own LogCleanerDelegate, just put it in HBase's classpath
-      and add the fully qualified class name here. Always add the above
-      default log cleaners in the list.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.35</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>10000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-      Used as sleep interval by service threads such as log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.versionfile.writeattempts</name>
-    <value>3</value>
-    <description>
-      How many time to retry attempting to write a version file
-      before just aborting. Each attempt is seperated by the
-      hbase.server.thread.wakefrequency milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.preclose.flush.size</name>
-    <value>5242880</value>
-    <description>
-      If the memstores in a region are this size or larger when we go
-      to close, run a "pre-flush" to clear out memstores before we put up
-      the region closed flag and take the region offline.  On close,
-      a flush is run under the close flag to empty memory.  During
-      this time the region is offline and we are not taking on any writes.
-      If the memstore content is large, this flush could take a long time to
-      complete.  The preflush is meant to clean out the bulk of the memstore
-      before putting up the close flag and taking the region offline so the
-      flush that runs under the close flag has little to do.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>
-      Block updates if memstore has hbase.hregion.block.memstore
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 10G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>7</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingWaitTime</name>
-    <value>90000</value>
-    <description>
-      The time an HRegion will block updates for after hitting the StoreFile
-      limit defined by hbase.hstore.blockingStoreFiles.
-      After this time has elapsed, the HRegion will stop blocking updates even
-      if a compaction has not been completed.  Default: 90 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compaction.max</name>
-    <value>10</value>
-    <description>Max number of HStoreFiles to compact per 'minor' compaction.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: 1 day.
-      Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.mapreduce.hfileoutputformat.blocksize</name>
-    <value>65536</value>
-    <description>The mapreduce HFileOutputFormat writes storefiles/hfiles.
-      This is the minimum hfile blocksize to emit.  Usually in hbase, writing
-      hfiles, the blocksize is gotten from the table schema (HColumnDescriptor)
-      but in the mapreduce outputformat context, we don't have access to the
-      schema so get blocksize from Configuration.  The smaller you make
-      the blocksize, the bigger your index and the less you fetch on a
-      random-access.  Set the blocksize down if you have small cells and want
-      faster random-access of individual cells.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.25</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hash.type</name>
-    <value>murmur</value>
-    <description>The hashing algorithm for use in HashFunction. Two values are
-      supported now: murmur (MurmurHash) and jenkins (JenkinsHash).
-      Used by bloom filters.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.index.cacheonwrite</name>
-    <value>false</value>
-    <description>
-      This allows to put non-root multi-level index blocks into the block
-      cache at the time the index is being written.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.checksum.verify</name>
-    <value>false</value>
-    <description>
-      Allow hbase to do checksums rather than using hdfs checksums. This is a 
backwards
-      incompatible change.
-    </description>
-  </property>
-  <property>
-    <name>hfile.index.block.max.size</name>
-    <value>131072</value>
-    <description>
-      When the size of a leaf-level, intermediate-level, or root-level
-      index block in a multi-level block index grows to this size, the
-      block is written out and a new block is started.
-    </description>
-  </property>
-  <property>
-    <name>hfile.format.version</name>
-    <value>2</value>
-    <description>
-      The HFile format version to use for new files. Set this to 1 to test
-      backwards-compatibility. The default value of this option should be
-      consistent with FixedFileTrailer.MAX_VERSION.
-    </description>
-  </property>
-  <property>
-    <name>io.storefile.bloom.block.size</name>
-    <value>131072</value>
-    <description>
-      The size in bytes of a single block ("chunk") of a compound Bloom
-      filter. This size is approximate, because Bloom blocks can only be
-      inserted at data block boundaries, and the number of keys per data
-      block varies.
-    </description>
-  </property>
-  <property>
-    <name>io.storefile.bloom.cacheonwrite</name>
-    <value>false</value>
-    <description>
-      Enables cache-on-write for inline blocks of a compound Bloom filter.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rs.cacheblocksonwrite</name>
-    <value>false</value>
-    <description>
-      Whether an HFile block should be added to the block cache when the
-      block is finished.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-    <description>Implementation of org.apache.hadoop.hbase.ipc.RpcEngine to be
-      used for client / server RPC call marshalling.
-    </description>
-  </property>
+    <property>
+        <name>hbase.rootdir</name>
+        <value>file:///tmp/hbase-${user.name}/hbase</value>
+        <description>The directory shared by region servers and into
+            which HBase persists. The URL should be 'fully-qualified'
+            to include the filesystem scheme. For example, to specify the
+            HDFS directory '/hbase' where the HDFS instance's namenode is
+            running at namenode.example.org on port 9000, set this value to:
+            hdfs://namenode.example.org:9000/hbase. By default HBase writes
+            into /tmp. Change this configuration else all data will be lost
+            on machine restart.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.port</name>
+        <value>60000</value>
+        <description>The port the HBase Master should bind to.</description>
+    </property>
+    <property>
+        <name>hbase.cluster.distributed</name>
+        <value>false</value>
+        <description>The mode the cluster will be in. Possible values are
+            false for standalone mode and true for distributed mode. If
+            false, startup will run all HBase and ZooKeeper daemons together
+            in the one JVM.
+        </description>
+    </property>
+    <property>
+        <name>hbase.tmp.dir</name>
+        <value>/tmp/hbase-${user.name}</value>
+        <description>Temporary directory on the local filesystem.
+            Change this setting to point to a location more permanent
+            than '/tmp' (The '/tmp' directory is often cleared on
+            machine restart).
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.info.port</name>
+        <value>60010</value>
+        <description>The port for the HBase Master web UI.
+            Set to -1 if you do not want a UI instance run.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.info.bindAddress</name>
+        <value>0.0.0.0</value>
+        <description>The bind address for the HBase Master web UI
+        </description>
+    </property>
+    <property>
+        <name>hbase.client.write.buffer</name>
+        <value>2097152</value>
+        <description>Default size of the HTable clien write buffer in bytes.
+            A bigger buffer takes more memory -- on both the client and server
+            side since server instantiates the passed write buffer to process
+            it -- but a larger buffer size reduces the number of RPCs made.
+            For an estimate of server-side memory-used, evaluate
+            hbase.client.write.buffer * hbase.regionserver.handler.count
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.port</name>
+        <value>60020</value>
+        <description>The port the HBase RegionServer binds to.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.info.port</name>
+        <value>60030</value>
+        <description>The port for the HBase RegionServer web UI
+            Set to -1 if you do not want the RegionServer UI to run.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.info.port.auto</name>
+        <value>false</value>
+        <description>Whether or not the Master or RegionServer
+            UI should search for a port to bind to. Enables automatic port
+            search if hbase.regionserver.info.port is already in use.
+            Useful for testing, turned off by default.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.info.bindAddress</name>
+        <value>0.0.0.0</value>
+        <description>The address for the HBase RegionServer web UI
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.class</name>
+        <value>org.apache.hadoop.hbase.ipc.HRegionInterface</value>
+        <description>The RegionServer interface to use.
+            Used by the client opening proxy to remote region server.
+        </description>
+    </property>
+    <property>
+        <name>hbase.client.pause</name>
+        <value>1000</value>
+        <description>General client pause value. Used mostly as value to wait
+            before running a retry of a failed get, region lookup, etc.
+        </description>
+    </property>
+    <property>
+        <name>hbase.client.retries.number</name>
+        <value>10</value>
+        <description>Maximum retries. Used as maximum for all retryable
+            operations such as fetching of the root region from root region
+            server, getting a cell's value, starting a row update, etc.
+            Default: 10.
+        </description>
+    </property>
+    <property>
+        <name>hbase.bulkload.retries.number</name>
+        <value>0</value>
+        <description>Maximum retries. This is maximum number of iterations
+            to atomic bulk loads are attempted in the face of splitting 
operations
+            0 means never give up. Default: 0.
+        </description>
+    </property>
+    <property>
+        <name>hbase.client.scanner.caching</name>
+        <value>1</value>
+        <description>Number of rows that will be fetched when calling next
+            on a scanner if it is not served from (local, client) memory. 
Higher
+            caching values will enable faster scanners but will eat up more 
memory
+            and some calls of next may take longer and longer times when the 
cache is empty.
+            Do not set this value such that the time between invocations is 
greater
+            than the scanner timeout; i.e. hbase.regionserver.lease.period
+        </description>
+    </property>
+    <property>
+        <name>hbase.client.keyvalue.maxsize</name>
+        <value>10485760</value>
+        <description>Specifies the combined maximum allowed size of a KeyValue
+            instance. This is to set an upper boundary for a single entry 
saved in a
+            storage file. Since they cannot be split it helps avoiding that a 
region
+            cannot be split any further because the data is too large. It 
seems wise
+            to set this to a fraction of the maximum region size. Setting it 
to zero
+            or less disables the check.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.lease.period</name>
+        <value>60000</value>
+        <description>HRegion server lease period in milliseconds. Default is
+            60 seconds. Clients must report in within this period else they are
+            considered dead.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.handler.count</name>
+        <value>10</value>
+        <description>Count of RPC Listener instances spun up on RegionServers.
+            Same property is used by the Master for count of master handlers.
+            Default is 10.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.msginterval</name>
+        <value>3000</value>
+        <description>Interval between messages from the RegionServer to Master
+            in milliseconds.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.optionallogflushinterval</name>
+        <value>1000</value>
+        <description>Sync the HLog to the HDFS after this interval if it has 
not
+            accumulated enough entries to trigger a sync. Default 1 second. 
Units:
+            milliseconds.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.regionSplitLimit</name>
+        <value>2147483647</value>
+        <description>Limit for the number of regions after which no more region
+            splitting should take place. This is not a hard limit for the 
number of
+            regions but acts as a guideline for the regionserver to stop 
splitting after
+            a certain limit. Default is set to MAX_INT; i.e. do not block 
splitting.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.logroll.period</name>
+        <value>3600000</value>
+        <description>Period at which we will roll the commit log regardless
+            of how many edits it has.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.logroll.errors.tolerated</name>
+        <value>2</value>
+        <description>The number of consecutive WAL close errors we will allow
+            before triggering a server abort. A setting of 0 will cause the
+            region server to abort if closing the current WAL writer fails 
during
+            log rolling. Even a small value (2 or 3) will allow a region server
+            to ride over transient HDFS errors.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.hlog.reader.impl</name>
+        
<value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader</value>
+        <description>The HLog file reader implementation.</description>
+    </property>
+    <property>
+        <name>hbase.regionserver.hlog.writer.impl</name>
+        
<value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogWriter</value>
+        <description>The HLog file writer implementation.</description>
+    </property>
+    <property>
+        <name>hbase.regionserver.nbreservationblocks</name>
+        <value>4</value>
+        <description>The number of resevoir blocks of memory release on
+            OOME so we can cleanup properly before server shutdown.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.dns.interface</name>
+        <value>default</value>
+        <description>The name of the Network Interface from which a ZooKeeper 
server
+            should report its IP address.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.dns.nameserver</name>
+        <value>default</value>
+        <description>The host name or IP address of the name server (DNS)
+            which a ZooKeeper server should use to determine the host name 
used by the
+            master for communication and display purposes.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.dns.interface</name>
+        <value>default</value>
+        <description>The name of the Network Interface from which a region 
server
+            should report its IP address.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.dns.nameserver</name>
+        <value>default</value>
+        <description>The host name or IP address of the name server (DNS)
+            which a region server should use to determine the host name used 
by the
+            master for communication and display purposes.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.dns.interface</name>
+        <value>default</value>
+        <description>The name of the Network Interface from which a master
+            should report its IP address.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.dns.nameserver</name>
+        <value>default</value>
+        <description>The host name or IP address of the name server (DNS)
+            which a master should use to determine the host name used
+            for communication and display purposes.
+        </description>
+    </property>
+    <property>
+        <name>hbase.balancer.period
+        </name>
+        <value>300000</value>
+        <description>Period at which the region balancer runs in the Master.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regions.slop</name>
+        <value>0.2</value>
+        <description>Rebalance if any regionserver has average + (average * 
slop) regions.
+            Default is 20% slop.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.logcleaner.ttl</name>
+        <value>600000</value>
+        <description>Maximum time a HLog can stay in the .oldlogdir directory,
+            after which it will be cleaned by a Master thread.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.logcleaner.plugins</name>
+        
<value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
+        <description>A comma-separated list of LogCleanerDelegate invoked by
+            the LogsCleaner service. These WAL/HLog cleaners are called in 
order,
+            so put the HLog cleaner that prunes the most HLog files in front. 
To
+            implement your own LogCleanerDelegate, just put it in HBase's 
classpath
+            and add the fully qualified class name here. Always add the above
+            default log cleaners in the list.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.global.memstore.upperLimit</name>
+        <value>0.4</value>
+        <description>Maximum size of all memstores in a region server before 
new
+            updates are blocked and flushes are forced. Defaults to 40% of heap
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.global.memstore.lowerLimit</name>
+        <value>0.35</value>
+        <description>When memstores are being forced to flush to make room in
+            memory, keep flushing until we hit this mark. Defaults to 35% of 
heap.
+            This value equal to hbase.regionserver.global.memstore.upperLimit 
causes
+            the minimum possible flushing to occur when updates are blocked 
due to
+            memstore limiting.
+        </description>
+    </property>
+    <property>
+        <name>hbase.server.thread.wakefrequency</name>
+        <value>10000</value>
+        <description>Time to sleep in between searches for work (in 
milliseconds).
+            Used as sleep interval by service threads such as log roller.
+        </description>
+    </property>
+    <property>
+        <name>hbase.server.versionfile.writeattempts</name>
+        <value>3</value>
+        <description>
+            How many time to retry attempting to write a version file
+            before just aborting. Each attempt is seperated by the
+            hbase.server.thread.wakefrequency milliseconds.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hregion.memstore.flush.size</name>
+        <value>134217728</value>
+        <description>
+            Memstore will be flushed to disk if size of the memstore
+            exceeds this number of bytes. Value is checked by a thread that 
runs
+            every hbase.server.thread.wakefrequency.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hregion.preclose.flush.size</name>
+        <value>5242880</value>
+        <description>
+            If the memstores in a region are this size or larger when we go
+            to close, run a "pre-flush" to clear out memstores before we put up
+            the region closed flag and take the region offline. On close,
+            a flush is run under the close flag to empty memory. During
+            this time the region is offline and we are not taking on any 
writes.
+            If the memstore content is large, this flush could take a long 
time to
+            complete. The preflush is meant to clean out the bulk of the 
memstore
+            before putting up the close flag and taking the region offline so 
the
+            flush that runs under the close flag has little to do.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hregion.memstore.block.multiplier</name>
+        <value>2</value>
+        <description>
+            Block updates if memstore has hbase.hregion.block.memstore
+            time hbase.hregion.flush.size bytes. Useful preventing
+            runaway memstore during spikes in update traffic. Without an
+            upper-bound, memstore fills such that when it flushes the
+            resultant flush files take a long time to compact or split, or
+            worse, we OOME.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hregion.memstore.mslab.enabled</name>
+        <value>true</value>
+        <description>
+            Enables the MemStore-Local Allocation Buffer,
+            a feature which works to prevent heap fragmentation under
+            heavy write loads. This can reduce the frequency of stop-the-world
+            GC pauses on large heaps.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hregion.max.filesize</name>
+        <value>10737418240</value>
+        <description>
+            Maximum HStoreFile size. If any one of a column families' 
HStoreFiles has
+            grown to exceed this value, the hosting HRegion is split in two.
+            Default: 10G.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hstore.compactionThreshold</name>
+        <value>3</value>
+        <description>
+            If more than this number of HStoreFiles in any one HStore
+            (one HStoreFile is written per flush of memstore) then a compaction
+            is run to rewrite all HStoreFiles files as one. Larger numbers
+            put off compaction but when it runs, it takes longer to complete.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hstore.blockingStoreFiles</name>
+        <value>7</value>
+        <description>
+            If more than this number of StoreFiles in any one Store
+            (one StoreFile is written per flush of MemStore) then updates are
+            blocked for this HRegion until a compaction is completed, or
+            until hbase.hstore.blockingWaitTime has been exceeded.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hstore.blockingWaitTime</name>
+        <value>90000</value>
+        <description>
+            The time an HRegion will block updates for after hitting the 
StoreFile
+            limit defined by hbase.hstore.blockingStoreFiles.
+            After this time has elapsed, the HRegion will stop blocking 
updates even
+            if a compaction has not been completed. Default: 90 seconds.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hstore.compaction.max</name>
+        <value>10</value>
+        <description>Max number of HStoreFiles to compact per 'minor' 
compaction.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hregion.majorcompaction</name>
+        <value>86400000</value>
+        <description>The time (in miliseconds) between 'major' compactions of 
all
+            HStoreFiles in a region. Default: 1 day.
+            Set to 0 to disable automated major compactions.
+        </description>
+    </property>
+    <property>
+        <name>hbase.mapreduce.hfileoutputformat.blocksize</name>
+        <value>65536</value>
+        <description>The mapreduce HFileOutputFormat writes storefiles/hfiles.
+            This is the minimum hfile blocksize to emit. Usually in hbase, 
writing
+            hfiles, the blocksize is gotten from the table schema 
(HColumnDescriptor)
+            but in the mapreduce outputformat context, we don't have access to 
the
+            schema so get blocksize from Configuration. The smaller you make
+            the blocksize, the bigger your index and the less you fetch on a
+            random-access. Set the blocksize down if you have small cells and 
want
+            faster random-access of individual cells.
+        </description>
+    </property>
+    <property>
+        <name>hfile.block.cache.size</name>
+        <value>0.25</value>
+        <description>
+            Percentage of maximum heap (-Xmx setting) to allocate to block 
cache
+            used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+            Set to 0 to disable but it's not recommended.
+        </description>
+    </property>
+    <property>
+        <name>hbase.hash.type</name>
+        <value>murmur</value>
+        <description>The hashing algorithm for use in HashFunction. Two values 
are
+            supported now: murmur (MurmurHash) and jenkins (JenkinsHash).
+            Used by bloom filters.
+        </description>
+    </property>
+    <property>
+        <name>hfile.block.index.cacheonwrite</name>
+        <value>false</value>
+        <description>
+            This allows to put non-root multi-level index blocks into the block
+            cache at the time the index is being written.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.checksum.verify</name>
+        <value>false</value>
+        <description>
+            Allow hbase to do checksums rather than using hdfs checksums. This 
is a backwards
+            incompatible change.
+        </description>
+    </property>
+    <property>
+        <name>hfile.index.block.max.size</name>
+        <value>131072</value>
+        <description>
+            When the size of a leaf-level, intermediate-level, or root-level
+            index block in a multi-level block index grows to this size, the
+            block is written out and a new block is started.
+        </description>
+    </property>
+    <property>
+        <name>hfile.format.version</name>
+        <value>2</value>
+        <description>
+            The HFile format version to use for new files. Set this to 1 to 
test
+            backwards-compatibility. The default value of this option should be
+            consistent with FixedFileTrailer.MAX_VERSION.
+        </description>
+    </property>
+    <property>
+        <name>io.storefile.bloom.block.size</name>
+        <value>131072</value>
+        <description>
+            The size in bytes of a single block ("chunk") of a compound Bloom
+            filter. This size is approximate, because Bloom blocks can only be
+            inserted at data block boundaries, and the number of keys per data
+            block varies.
+        </description>
+    </property>
+    <property>
+        <name>io.storefile.bloom.cacheonwrite</name>
+        <value>false</value>
+        <description>
+            Enables cache-on-write for inline blocks of a compound Bloom 
filter.
+        </description>
+    </property>
+    <property>
+        <name>hbase.rs.cacheblocksonwrite</name>
+        <value>false</value>
+        <description>
+            Whether an HFile block should be added to the block cache when the
+            block is finished.
+        </description>
+    </property>
+    <property>
+        <name>hbase.rpc.engine</name>
+        <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
+        <description>Implementation of org.apache.hadoop.hbase.ipc.RpcEngine 
to be
+            used for client / server RPC call marshalling.
+        </description>
+    </property>
 
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_h...@example.com".  The kerberos principal name
-      that should be used to run the HMaster process.  The principal name 
should
-      be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-      portion, it will be replaced with the actual hostname of the running
-      instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_h...@example.com".  The kerberos principal name
-      that should be used to run the HRegionServer process.  The principal name
-      should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-      hostname portion, it will be replaced with the actual hostname of the
-      running instance.  An entry for this principal must exist in the file
-      specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
+    <!-- The following properties configure authentication information for
+         HBase processes when using Kerberos security.  There are no default
+         values, included here for documentation purposes -->
+    <property>
+        <name>hbase.master.keytab.file</name>
+        <value></value>
+        <description>Full path to the kerberos keytab file to use for logging 
in
+            the configured HMaster server principal.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.kerberos.principal</name>
+        <value></value>
+        <description>Ex. "hbase/_h...@example.com". The kerberos principal name
+            that should be used to run the HMaster process. The principal name 
should
+            be in the form: user/hostname@DOMAIN. If "_HOST" is used as the 
hostname
+            portion, it will be replaced with the actual hostname of the 
running
+            instance.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.keytab.file</name>
+        <value></value>
+        <description>Full path to the kerberos keytab file to use for logging 
in
+            the configured HRegionServer server principal.
+        </description>
+    </property>
+    <property>
+        <name>hbase.regionserver.kerberos.principal</name>
+        <value></value>
+        <description>Ex. "hbase/_h...@example.com". The kerberos principal name
+            that should be used to run the HRegionServer process. The 
principal name
+            should be in the form: user/hostname@DOMAIN. If "_HOST" is used as 
the
+            hostname portion, it will be replaced with the actual hostname of 
the
+            running instance. An entry for this principal must exist in the 
file
+            specified in hbase.regionserver.keytab.file
+        </description>
+    </property>
 
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hadoop.policy.file</name>
-    <value>hbase-policy.xml</value>
-    <description>The policy configuration file used by RPC servers to make
-      authorization decisions on client requests.  Only used when HBase
-      security is enabled.
-    </description>
-  </property>
-  <property>
-    <name>hbase.superuser</name>
-    <value></value>
-    <description>List of users or groups (comma-separated), who are allowed
-      full privileges, regardless of stored ACLs, across the cluster.
-      Only used when HBase security is enabled.
-    </description>
-  </property>
-  <property>
-    <name>hbase.auth.key.update.interval</name>
-    <value>86400000</value>
-    <description>The update interval for master key for authentication tokens
-      in servers in milliseconds.  Only used when HBase security is enabled.
-    </description>
-  </property>
-  <property>
-    <name>hbase.auth.token.max.lifetime</name>
-    <value>604800000</value>
-    <description>The maximum lifetime in milliseconds after which an
-      authentication token expires.  Only used when HBase security is enabled.
-    </description>
-  </property>
+    <!-- Additional configuration specific to HBase security -->
+    <property>
+        <name>hadoop.policy.file</name>
+        <value>hbase-policy.xml</value>
+        <description>The policy configuration file used by RPC servers to make
+            authorization decisions on client requests. Only used when HBase
+            security is enabled.
+        </description>
+    </property>
+    <property>
+        <name>hbase.superuser</name>
+        <value></value>
+        <description>List of users or groups (comma-separated), who are allowed
+            full privileges, regardless of stored ACLs, across the cluster.
+            Only used when HBase security is enabled.
+        </description>
+    </property>
+    <property>
+        <name>hbase.auth.key.update.interval</name>
+        <value>86400000</value>
+        <description>The update interval for master key for authentication 
tokens
+            in servers in milliseconds. Only used when HBase security is 
enabled.
+        </description>
+    </property>
+    <property>
+        <name>hbase.auth.token.max.lifetime</name>
+        <value>604800000</value>
+        <description>The maximum lifetime in milliseconds after which an
+            authentication token expires. Only used when HBase security is 
enabled.
+        </description>
+    </property>
 
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>180000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      
http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.rootserver</name>
-    <value>root-region-server</value>
-    <description>Path to ZNode holding root region location. This is written by
-      the master and read by clients and region servers. If a relative path is
-      given, the parent folder will be ${zookeeper.znode.parent}. By default,
-      this means the root location is stored at /hbase/root-region-server.
-    </description>
-  </property>
+    <property>
+        <name>zookeeper.session.timeout</name>
+        <value>180000</value>
+        <description>ZooKeeper session timeout.
+            HBase passes this to the zk quorum as suggested maximum time for a
+            session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+            
http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+            "The client sends a requested timeout, the server responds with the
+            timeout that it can give the client. " In milliseconds.
+        </description>
+    </property>
+    <property>
+        <name>zookeeper.znode.parent</name>
+        <value>/hbase</value>
+        <description>Root ZNode for HBase in ZooKeeper. All of HBase's 
ZooKeeper
+            files that are configured with a relative path will go under this 
node.
+            By default, all of HBase's ZooKeeper file path are configured with 
a
+            relative path, so they will all go under this directory unless 
changed.
+        </description>
+    </property>
+    <property>
+        <name>zookeeper.znode.rootserver</name>
+        <value>root-region-server</value>
+        <description>Path to ZNode holding root region location. This is 
written by
+            the master and read by clients and region servers. If a relative 
path is
+            given, the parent folder will be ${zookeeper.znode.parent}. By 
default,
+            this means the root location is stored at 
/hbase/root-region-server.
+        </description>
+    </property>
 
-  <property>
-    <name>zookeeper.znode.acl.parent</name>
-    <value>acl</value>
-    <description>Root ZNode for access control lists.</description>
-  </property>
+    <property>
+        <name>zookeeper.znode.acl.parent</name>
+        <value>acl</value>
+        <description>Root ZNode for access control lists.</description>
+    </property>
 
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just 
put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
+    <property>
+        <name>hbase.coprocessor.region.classes</name>
+        <value></value>
+        <description>A comma-separated list of Coprocessors that are loaded by
+            default on all tables. For any override coprocessor method, these 
classes
+            will be called in order. After implementing your own Coprocessor, 
just put
+            it in HBase's classpath and add the fully qualified class name 
here.
+            A coprocessor can also be loaded on demand by setting 
HTableDescriptor.
+        </description>
+    </property>
 
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
+    <property>
+        <name>hbase.coprocessor.master.classes</name>
+        <value></value>
+        <description>A comma-separated list of
+            org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors 
that are
+            loaded by default on the active HMaster process. For any 
implemented
+            coprocessor methods, the listed classes will be called in order. 
After
+            implementing your own MasterObserver, just put it in HBase's 
classpath
+            and add the fully qualified class name here.
+        </description>
+    </property>
 
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed 
modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in 
hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.peerport</name>
-    <value>2888</value>
-    <description>Port used by ZooKeeper peers to talk to each other.
-      See 
http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
-      for more information.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.leaderport</name>
-    <value>3888</value>
-    <description>Port used by ZooKeeper for leader election.
-      See 
http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
-      for more information.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update 
functionality.
-      This allows certain ZooKeeper operations to complete more quickly and 
prevents some issues
-      with rare ZooKeeper failure scenarios (see the release note of 
HBASE-6710 for an example).
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster 
are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not 
support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-      NOTE: this and future versions of HBase are only supported to work with
-      versions of ZooKeeper with multi support (CDH4+), so it is safe to use 
ZK.multi.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+    <!--
+    The following three properties are used together to create the list of
+    host:peer_port:leader_port quorum servers for ZooKeeper.
+    -->
+    <property>
+        <name>hbase.zookeeper.quorum</name>
+        <value>localhost</value>
+        <description>Comma separated list of servers in the ZooKeeper Quorum.
+            For example, 
"host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+            By default this is set to localhost for local and 
pseudo-distributed modes
+            of operation. For a fully-distributed setup, this should be set to 
a full
+            list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in 
hbase-env.sh
+            this is the list of servers which we will start/stop ZooKeeper on.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.peerport</name>
+        <value>2888</value>
+        <description>Port used by ZooKeeper peers to talk to each other.
+            See 
http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+            for more information.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.leaderport</name>
+        <value>3888</value>
+        <description>Port used by ZooKeeper for leader election.
+            See 
http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+            for more information.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.useMulti</name>
+        <value>true</value>
+        <description>Instructs HBase to make use of ZooKeeper's multi-update 
functionality.
+            This allows certain ZooKeeper operations to complete more quickly 
and prevents some issues
+            with rare ZooKeeper failure scenarios (see the release note of 
HBASE-6710 for an example).
+            IMPORTANT: only set this to true if all ZooKeeper servers in the 
cluster are on version 3.4+
+            and will not be downgraded. ZooKeeper versions before 3.4 do not 
support multi-update and will
+            not fail gracefully if multi-update is invoked (see 
ZOOKEEPER-1495).
+            NOTE: this and future versions of HBase are only supported to work 
with
+            versions of ZooKeeper with multi support (CDH4+), so it is safe to 
use ZK.multi.
+        </description>
+    </property>
+    <!-- End of properties used to generate ZooKeeper host:port quorum list. 
-->
 
-  <!--
-  Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
-  All properties with an "hbase.zookeeper.property." prefix are converted for
-  ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
-  e.g.  "initLimit=10" you would append the following to your configuration:
-    <property>
-      <name>hbase.zookeeper.property.initLimit</name>
-      <value>10</value>
-    </property>
-  -->
-  <property>
-    <name>hbase.zookeeper.property.initLimit</name>
-    <value>10</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The number of ticks that the initial synchronization phase can take.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.syncLimit</name>
-    <value>5</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The number of ticks that can pass between sending a request and getting 
an
-      acknowledgment.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.dataDir</name>
-    <value>${hbase.tmp.dir}/zookeeper</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The directory where the snapshot is stored.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.maxClientCnxns</name>
-    <value>300</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      Limit on number of concurrent connections (at the socket level) that a
-      single client, identified by IP address, may make to a single member of
-      the ZooKeeper ensemble. Set high to avoid zk connection issues running
-      standalone and pseudo-distributed.
-    </description>
-  </property>
-  <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
-  <property>
-    <name>hbase.rest.port</name>
-    <value>8080</value>
-    <description>The port for the HBase REST server.</description>
-  </property>
-  <property>
-    <name>hbase.rest.readonly</name>
-    <value>false</value>
-    <description>
-      Defines the mode the REST server will be started in. Possible values are:
-      false: All HTTP methods are permitted - GET/PUT/POST/DELETE.
-      true: Only the GET method is permitted.
-    </description>
-  </property>
+    <!--
+    Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
+    All properties with an "hbase.zookeeper.property." prefix are converted for
+    ZooKeeper's configuration. Hence, if you want to add an option from 
zoo.cfg,
+    e.g.  "initLimit=10" you would append the following to your configuration:
+      <property>
+        <name>hbase.zookeeper.property.initLimit</name>
+        <value>10</value>
+      </property>
+    -->
+    <property>
+        <name>hbase.zookeeper.property.initLimit</name>
+        <value>10</value>
+        <description>Property from ZooKeeper's config zoo.cfg.
+            The number of ticks that the initial synchronization phase can 
take.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.property.syncLimit</name>
+        <value>5</value>
+        <description>Property from ZooKeeper's config zoo.cfg.
+            The number of ticks that can pass between sending a request and 
getting an
+            acknowledgment.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.property.dataDir</name>
+        <value>${hbase.tmp.dir}/zookeeper</value>
+        <description>Property from ZooKeeper's config zoo.cfg.
+            The directory where the snapshot is stored.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.property.clientPort</name>
+        <value>2181</value>
+        <description>Property from ZooKeeper's config zoo.cfg.
+            The port at which the clients will connect.
+        </description>
+    </property>
+    <property>
+        <name>hbase.zookeeper.property.maxClientCnxns</name>
+        <value>300</value>
+        <description>Property from ZooKeeper's config zoo.cfg.
+            Limit on number of concurrent connections (at the socket level) 
that a
+            single client, identified by IP address, may make to a single 
member of
+            the ZooKeeper ensemble. Set high to avoid zk connection issues 
running
+            standalone and pseudo-distributed.
+        </description>
+    </property>
+    <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg 
-->
+    <property>
+        <name>hbase.rest.port</name>
+        <value>8080</value>
+        <description>The port for the HBase REST server.</description>
+    </property>
+    <property>
+        <name>hbase.rest.readonly</name>
+        <value>false</value>
+        <description>
+            Defines the mode the REST server will be started in. Possible 
values are:
+            false: All HTTP methods are permitted - GET/PUT/POST/DELETE.
+            true: Only the GET method is permitted.
+        </description>
+    </property>
 
-  <property skipInDoc="true">
-    <name>hbase.defaults.for.version</name>
-    <value>0.94.2-cdh4.2.1</value>
-    <description>
-      This defaults file was compiled for version 0.94.2-cdh4.2.1. This 
variable is used
-      to make sure that a user doesn't have an old version of 
hbase-default.xml on the
-      classpath.
-    </description>
-  </property>
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>
-      Set to true to skip the 'hbase.defaults.for.version' check.
-      Setting this to true can be useful in contexts other than
-      the other side of a maven generation; i.e. running in an
-      ide.  You'll want to set this boolean to true to avoid
-      seeing the RuntimException complaint: "hbase-default.xml file
-      seems to be for and old version of HBase (0.94.2-cdh4.2.1), this
-      version is X.X.X-SNAPSHOT"
-    </description>
-  </property>
-  <property>
-    <name>hbase.coprocessor.abortonerror</name>
-    <value>false</value>
-    <description>
-      Set to true to cause the hosting server (master or regionserver) to
-      abort if a coprocessor throws a Throwable object that is not IOException 
or
-      a subclass of IOException. Setting it to true might be useful in 
development
-      environments where one wants to terminate the server as soon as possible 
to
-      simplify coprocessor failure analysis.
-    </description>
-  </property>
-  <property>
-    <name>hbase.online.schema.update.enable</name>
-    <value>false</value>
-    <description>
-      Set true to enable online schema changes.  This is an experimental 
feature.
-      There are known issues modifying table schemas at the same time a region
-      split is happening so your table needs to be quiescent or else you have 
to
-      be running with splits disabled.
-    </description>
-  </property>
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-      This is an hdfs config. set in here so the hdfs client will do append 
support.
-      You must ensure that this config. is true serverside too when running 
hbase
-      (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-  <property>
-    <name>hbase.thrift.minWorkerThreads</name>
-    <value>16</value>
-    <description>
-      The "core size" of the thread pool. New threads are created on every
-      connection until this many threads are created.
-    </description>
-  </property>
-  <property>
-    <name>hbase.thrift.maxWorkerThreads</name>
-    <value>1000</value>
-    <description>
-      The maximum size of the thread pool. When the pending request queue
-      overflows, new threads are created until their number reaches this 
number.
-      After that, the server starts dropping connections.
-    </description>
-  </property>
-  <property>
-    <name>hbase.thrift.maxQueuedRequests</name>
-    <value>1000</value>
-    <description>
-      The maximum number of pending Thrift connections waiting in the queue. If
-      there are no idle threads in the pool, the server queues requests. Only
-      when the queue overflows, new threads are added, up to
-      hbase.thrift.maxQueuedRequests threads.
-    </description>
-  </property>
-  <property>
-    <name>hbase.offheapcache.percentage</name>
-    <value>0</value>
-    <description>
-      The amount of off heap space to be allocated towards the experimental
-      off heap cache. If you desire the cache to be disabled, simply set this
-      value to 0.
-    </description>
-  </property>
-  <property>
-    <name>hbase.data.umask.enable</name>
-    <value>false</value>
-    <description>Enable, if true, that file permissions should be assigned
-      to the files written by the regionserver
-    </description>
-  </property>
-  <property>
-    <name>hbase.data.umask</name>
-    <value>000</value>
-    <description>File permissions that should be used to write data
-      files when hbase.data.umask.enable is true
-    </description>
-  </property>
+    <property skipInDoc="true">
+        <name>hbase.defaults.for.version</name>
+        <value>0.94.2-cdh4.2.1</value>
+        <description>
+            This defaults file was compiled for version 0.94.2-cdh4.2.1. This 
variable is used
+            to make sure that a user doesn't have an old version of 
hbase-default.xml on the
+            classpath.
+        </description>
+    </property>
+    <property>
+        <name>hbase.defaults.for.version.skip</name>
+        <value>true</value>
+        <description>
+            Set to true to skip the 'hbase.defaults.for.version' check.
+            Setting this to true can be useful in contexts other than
+            the other side of a maven generation; i.e. running in an
+            ide. You'll want to set this boolean to true to avoid
+            seeing the RuntimException complaint: "hbase-default.xml file
+            seems to be for and old version of HBase (0.94.2-cdh4.2.1), this
+            version is X.X.X-SNAPSHOT"
+        </description>
+    </property>
+    <property>
+        <name>hbase.coprocessor.abortonerror</name>
+        <value>false</value>
+        <description>
+            Set to true to cause the hosting server (master or regionserver) to
+            abort if a coprocessor throws a Throwable object that is not 
IOException or
+            a subclass of IOException. Setting it to true might be useful in 
development
+            environments where one wants to terminate the server as soon as 
possible to
+            simplify coprocessor failure analysis.
+        </description>
+    </property>
+    <property>
+        <name>hbase.online.schema.update.enable</name>
+        <value>false</value>
+        <description>
+            Set true to enable online schema changes. This is an experimental 
feature.
+            There are known issues modifying table schemas at the same time a 
region
+            split is happening so your table needs to be quiescent or else you 
have to
+            be running with splits disabled.
+        </description>
+    </property>
+    <property>
+        <name>dfs.support.append</name>
+        <value>true</value>
+        <description>Does HDFS allow appends to files?
+            This is an hdfs config. set in here so the hdfs client will do 
append support.
+            You must ensure that this config. is true serverside too when 
running hbase
+            (You will have to restart your cluster after setting it).
+        </description>
+    </property>
+    <property>
+        <name>hbase.thrift.minWorkerThreads</name>
+        <value>16</value>
+        <description>
+            The "core size" of the thread pool. New threads are created on 
every
+            connection until this many threads are created.
+        </description>
+    </property>
+    <property>
+        <name>hbase.thrift.maxWorkerThreads</name>
+        <value>1000</value>
+        <description>
+            The maximum size of the thread pool. When the pending request queue
+            overflows, new threads are created until their number reaches this 
number.
+            After that, the server starts dropping connections.
+        </description>
+    </property>
+    <property>
+        <name>hbase.thrift.maxQueuedRequests</name>
+        <value>1000</value>
+        <description>
+            The maximum number of pending Thrift connections waiting in the 
queue. If
+            there are no idle threads in the pool, the server queues requests. 
Only
+            when the queue overflows, new threads are added, up to
+            hbase.thrift.maxQueuedRequests threads.
+        </description>
+    </property>
+    <property>
+        <name>hbase.offheapcache.percentage</name>
+        <value>0</value>
+        <description>
+            The amount of off heap space to be allocated towards the 
experimental
+            off heap cache. If you desire the cache to be disabled, simply set 
this
+            value to 0.
+        </description>
+    </property>
+    <property>
+        <name>hbase.data.umask.enable</name>
+        <value>false</value>
+        <description>Enable, if true, that file permissions should be assigned
+            to the files written by the regionserver
+        </description>
+    </property>
+    <property>
+        <name>hbase.data.umask</name>
+        <value>000</value>
+        <description>File permissions that should be used to write data
+            files when hbase.data.umask.enable is true
+        </description>
+    </property>
 
-  <property>
-    <name>hbase.metrics.showTableName</name>
-    <value>true</value>
-    <description>Whether to include the prefix "tbl.tablename" in per-column 
family metrics.
-      If true, for each metric M, per-cf metrics will be reported for 
tbl.T.cf.CF.M, if false,
-      per-cf metrics will be aggregated by column-family across tables, and 
reported for cf.CF.M.
-      In both cases, the aggregated metric M across tables and cfs will be 
reported.
-    </description>
-  </property>
-  <property>
-    <name>hbase.table.archive.directory</name>
-    <value>.archive</value>
-    <description>Per-table directory name under which to backup files for a
-      table. Files are moved to the same directories as they would be under the
-      table directory, but instead are just one level lower (under
-      table/.archive/... rather than table/...). Currently only applies to 
HFiles.</description>
-  </property>
-  <property>
-    <name>hbase.master.hfilecleaner.plugins</name>
-    
<value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
-    <description>A comma-separated list of HFileCleanerDelegate invoked by
-      the HFileCleaner service. These HFiles cleaners are called in order,
-      so put the cleaner that prunes the most files in front. To
-      implement your own HFileCleanerDelegate, just put it in HBase's classpath
-      and add the fully qualified class name here. Always add the above
-      default log cleaners in the list as they will be overwritten in 
hbase-site.xml.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rest.threads.max</name>
-    <value>100</value>
-    <description>
-      The maximum number of threads of the REST server thread pool.
-      Threads in the pool are reused to process REST requests. This
-      controls the maximum number of requests processed concurrently.
-      It may help to control the memory used by the REST server to
-      avoid OOM issues. If the thread pool is full, incoming requests
-      will be queued up and wait for some free threads. The default
-      is 100.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rest.threads.min</name>
-    <value>2</value>
-    <description>
-      The minimum number of threads of the REST server thread pool.
-      The thread pool always has at least these number of threads so
-      the REST server is ready to serve incoming requests. The default
-      is 2.
-    </description>
-  </property>
+    <property>
+        <name>hbase.metrics.showTableName</name>
+        <value>true</value>
+        <description>Whether to include the prefix "tbl.tablename" in 
per-column family metrics.
+            If true, for each metric M, per-cf metrics will be reported for 
tbl.T.cf.CF.M, if false,
+            per-cf metrics will be aggregated by column-family across tables, 
and reported for cf.CF.M.
+            In both cases, the aggregated metric M across tables and cfs will 
be reported.
+        </description>
+    </property>
+    <property>
+        <name>hbase.table.archive.directory</name>
+        <value>.archive</value>
+        <description>Per-table directory name under which to backup files for a
+            table. Files are moved to the same directories as they would be 
under the
+            table directory, but instead are just one level lower (under
+            table/.archive/... rather than table/...). Currently only applies 
to HFiles.
+        </description>
+    </property>
+    <property>
+        <name>hbase.master.hfilecleaner.plugins</name>
+        
<value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
+        <description>A comma-separated list of HFileCleanerDelegate invoked by
+            the HFileCleaner service. These HFiles cleaners are called in 
order,
+            so put the cleaner that prunes the most files in front. To
+            implement your own HFileCleanerDelegate, just put it in HBase's 
classpath
+            and add the fully qualified class name here. Always add the above
+            default log cleaners in the list as they will be overwritten in 
hbase-site.xml.
+        </description>
+    </property>
+    <property>
+        <name>hbase.rest.threads.max</name>
+        <value>100</value>
+        <description>
+            The maximum number of threads of the REST server thread pool.
+            Threads in the pool are reused to process REST requests. This
+            controls the maximum number of requests processed concurrently.
+            It may help to control the memory used by the REST server to
+            avoid OOM issues. If the thread pool is full, incoming requests
+            will be queued up and wait for some free threads. The default
+            is 100.
+        </description>
+    </property>
+    <property>
+        <name>hbase.rest.threads.min</name>
+        <value>2</value>
+        <description>
+            The minimum number of threads of the REST server thread pool.
+            The thread pool always has at least these number of threads so
+            the REST server is ready to serve incoming requests. The default
+            is 2.
+        </description>
+    </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a88e1cef/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
index bc4f68f..31af2a1 100644
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
+++ 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
@@ -17,8 +17,10 @@
 package org.apache.eagle.service.hbase;
 
 import org.apache.hadoop.conf.Configuration;
-import org.junit.*;
-
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
 
 
 @Ignore
@@ -30,8 +32,8 @@ public class TestHBaseBase {
         hbase = EmbeddedHbase.getInstance();
     }
 
-    public static void setupHBaseWithConfig(Configuration config){
-        Assert.assertTrue("HBase test mini cluster should not start",null == 
hbase);
+    public static void setupHBaseWithConfig(Configuration config) {
+        Assert.assertTrue("HBase test mini cluster should not start", null == 
hbase);
         hbase = EmbeddedHbase.getInstance(config);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/a88e1cef/eagle-core/eagle-embed/eagle-embed-server/src/test/java/org/apache/eagle/service/embedded/tomcat/TestEmbeddedServer.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-server/src/test/java/org/apache/eagle/service/embedded/tomcat/TestEmbeddedServer.java
 
b/eagle-core/eagle-embed/eagle-embed-server/src/test/java/org/apache/eagle/service/embedded/tomcat/TestEmbeddedServer.java
index 5299e0c..ec3439e 100644
--- 
a/eagle-core/eagle-embed/eagle-embed-server/src/test/java/org/apache/eagle/service/embedded/tomcat/TestEmbeddedServer.java
+++ 
b/eagle-core/eagle-embed/eagle-embed-server/src/test/java/org/apache/eagle/service/embedded/tomcat/TestEmbeddedServer.java
@@ -21,10 +21,10 @@ import org.junit.Test;
 
 @Ignore
 public class TestEmbeddedServer {
-       
-       @Test
-       public void test() throws Throwable{
-               String webappDirLocation = 
"../../../eagle-webservice/target/eagle-service";
-               EmbeddedServer.getInstance(webappDirLocation);
-       }
+
+    @Test
+    public void test() throws Throwable {
+        String webappDirLocation = 
"../../../eagle-webservice/target/eagle-service";
+        EmbeddedServer.getInstance(webappDirLocation);
+    }
 }
\ No newline at end of file


Reply via email to