http://git-wip-us.apache.org/repos/asf/hbase/blob/cb77a925/src/main/asciidoc/_chapters/unit_testing.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/unit_testing.adoc 
b/src/main/asciidoc/_chapters/unit_testing.adoc
new file mode 100644
index 0000000..1ffedf1
--- /dev/null
+++ b/src/main/asciidoc/_chapters/unit_testing.adoc
@@ -0,0 +1,349 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[[unit.tests]]
+= Unit Testing HBase Applications
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+This chapter discusses unit testing your HBase application using JUnit, 
Mockito, MRUnit, and HBaseTestingUtility.
+Much of the information comes from 
link:http://blog.cloudera.com/blog/2013/09/how-to-test-hbase-applications-using-popular-tools/[a
 community blog post about testing HBase applications].
+For information on unit tests for HBase itself, see 
<<hbase.tests,hbase.tests>>.
+
+== JUnit
+
+HBase uses link:http://junit.org[JUnit] 4 for unit tests
+
+This example will add unit tests to the following example class:
+
+[source,java]
+----
+
+public class MyHBaseDAO {
+
+    public static void insertRecord(HTableInterface table, HBaseTestObj obj)
+    throws Exception {
+        Put put = createPut(obj);
+        table.put(put);
+    }
+    
+    private static Put createPut(HBaseTestObj obj) {
+        Put put = new Put(Bytes.toBytes(obj.getRowKey()));
+        put.add(Bytes.toBytes("CF"), Bytes.toBytes("CQ-1"),
+                    Bytes.toBytes(obj.getData1()));
+        put.add(Bytes.toBytes("CF"), Bytes.toBytes("CQ-2"),
+                    Bytes.toBytes(obj.getData2()));
+        return put;
+    }
+}
+----
+
+The first step is to add JUnit dependencies to your Maven POM file:
+
+[source,xml]
+----
+
+<dependency>
+    <groupId>junit</groupId>
+    <artifactId>junit</artifactId>
+    <version>4.11</version>
+    <scope>test</scope>
+</dependency>
+----
+
+Next, add some unit tests to your code.
+Tests are annotated with `@Test`.
+Here, the unit tests are in bold.
+
+[source,java]
+----
+
+public class TestMyHbaseDAOData {
+  @Test
+  public void testCreatePut() throws Exception {
+  HBaseTestObj obj = new HBaseTestObj();
+  obj.setRowKey("ROWKEY-1");
+  obj.setData1("DATA-1");
+  obj.setData2("DATA-2");
+  Put put = MyHBaseDAO.createPut(obj);
+  assertEquals(obj.getRowKey(), Bytes.toString(put.getRow()));
+  assertEquals(obj.getData1(), Bytes.toString(put.get(Bytes.toBytes("CF"), 
Bytes.toBytes("CQ-1")).get(0).getValue()));
+  assertEquals(obj.getData2(), Bytes.toString(put.get(Bytes.toBytes("CF"), 
Bytes.toBytes("CQ-2")).get(0).getValue()));
+  }
+}
+----
+
+These tests ensure that your `createPut` method creates, populates, and 
returns a `Put` object with expected values.
+Of course, JUnit can do much more than this.
+For an introduction to JUnit, see 
link:https://github.com/junit-team/junit/wiki/Getting-started. 
+
+== Mockito
+
+Mockito is a mocking framework.
+It goes further than JUnit by allowing you to test the interactions between 
objects without having to replicate the entire environment.
+You can read more about Mockito at its project site, 
link:https://code.google.com/p/mockito/.
+
+You can use Mockito to do unit testing on smaller units.
+For instance, you can mock a `org.apache.hadoop.hbase.Server` instance or a 
`org.apache.hadoop.hbase.master.MasterServices` interface reference rather than 
a full-blown `org.apache.hadoop.hbase.master.HMaster`.
+
+This example builds upon the example code in <<unit.tests,unit.tests>>, to 
test the `insertRecord` method.
+
+First, add a dependency for Mockito to your Maven POM file.
+
+[source,xml]
+----
+
+<dependency>
+    <groupId>org.mockito</groupId>
+    <artifactId>mockito-all</artifactId>
+    <version>1.9.5</version>
+    <scope>test</scope>
+</dependency>
+----
+
+Next, add a `@RunWith` annotation to your test class, to direct it to use 
Mockito.
+
+[source,java]
+----
+
+@RunWith(MockitoJUnitRunner.class)
+public class TestMyHBaseDAO{
+  @Mock 
+  private HTableInterface table;
+  @Mock
+  private HTablePool hTablePool;
+  @Captor
+  private ArgumentCaptor putCaptor;
+
+  @Test
+  public void testInsertRecord() throws Exception {
+    //return mock table when getTable is called
+    when(hTablePool.getTable("tablename")).thenReturn(table);
+    //create test object and make a call to the DAO that needs testing
+    HBaseTestObj obj = new HBaseTestObj();
+    obj.setRowKey("ROWKEY-1");
+    obj.setData1("DATA-1");
+    obj.setData2("DATA-2");
+    MyHBaseDAO.insertRecord(table, obj);
+    verify(table).put(putCaptor.capture());
+    Put put = putCaptor.getValue();
+  
+    assertEquals(Bytes.toString(put.getRow()), obj.getRowKey());
+    assert(put.has(Bytes.toBytes("CF"), Bytes.toBytes("CQ-1")));
+    assert(put.has(Bytes.toBytes("CF"), Bytes.toBytes("CQ-2")));
+    
assertEquals(Bytes.toString(put.get(Bytes.toBytes("CF"),Bytes.toBytes("CQ-1")).get(0).getValue()),
 "DATA-1");
+    
assertEquals(Bytes.toString(put.get(Bytes.toBytes("CF"),Bytes.toBytes("CQ-2")).get(0).getValue()),
 "DATA-2");
+  }
+}
+----
+
+This code populates `HBaseTestObj` with ``ROWKEY-1'', ``DATA-1'', ``DATA-2'' 
as values.
+It then inserts the record into the mocked table.
+The Put that the DAO would have inserted is captured, and values are tested to 
verify that they are what you expected them to be.
+
+The key here is to manage htable pool and htable instance creation outside the 
DAO.
+This allows you to mock them cleanly and test Puts as shown above.
+Similarly, you can now expand into other operations such as Get, Scan, or 
Delete.
+
+== MRUnit
+
+link:http://mrunit.apache.org/[Apache MRUnit] is a library that allows you to 
unit-test MapReduce jobs.
+You can use it to test HBase jobs in the same way as other MapReduce jobs.
+
+Given a MapReduce job that writes to an HBase table called `MyTest`, which has 
one column family called `CF`, the reducer of such a job could look like the 
following:
+
+[source,java]
+----
+
+public class MyReducer extends TableReducer<Text, Text, 
ImmutableBytesWritable> {
+   public static final byte[] CF = "CF".getBytes();
+   public static final byte[] QUALIFIER = "CQ-1".getBytes();
+   public void reduce(Text key, Iterable<Text> values, Context context) throws 
IOException, InterruptedException {
+     //bunch of processing to extract data to be inserted, in our case, lets 
say we are simply
+     //appending all the records we receive from the mapper for this particular
+     //key and insert one record into HBase
+     StringBuffer data = new StringBuffer();
+     Put put = new Put(Bytes.toBytes(key.toString()));
+     for (Text val : values) {
+         data = data.append(val);
+     }
+     put.add(CF, QUALIFIER, Bytes.toBytes(data.toString()));
+     //write to HBase
+     context.write(new ImmutableBytesWritable(Bytes.toBytes(key.toString())), 
put);
+   }
+ }
+----
+
+To test this code, the first step is to add a dependency to MRUnit to your 
Maven POM file. 
+
+[source,xml]
+----
+
+<dependency>
+   <groupId>org.apache.mrunit</groupId>
+   <artifactId>mrunit</artifactId>
+   <version>1.0.0 </version>
+   <scope>test</scope>
+</dependency>
+----
+
+Next, use the ReducerDriver provided by MRUnit, in your Reducer job.
+
+[source,java]
+----
+
+public class MyReducerTest {
+    ReduceDriver<Text, Text, ImmutableBytesWritable, Writable> reduceDriver;
+    byte[] CF = "CF".getBytes();
+    byte[] QUALIFIER = "CQ-1".getBytes();
+
+    @Before
+    public void setUp() {
+      MyReducer reducer = new MyReducer();
+      reduceDriver = ReduceDriver.newReduceDriver(reducer);
+    }
+  
+   @Test
+   public void testHBaseInsert() throws IOException {
+      String strKey = "RowKey-1", strValue = "DATA", strValue1 = "DATA1", 
+strValue2 = "DATA2";
+      List<Text> list = new ArrayList<Text>();
+      list.add(new Text(strValue));
+      list.add(new Text(strValue1));
+      list.add(new Text(strValue2));
+      //since in our case all that the reducer is doing is appending the 
records that the mapper   
+      //sends it, we should get the following back
+      String expectedOutput = strValue + strValue1 + strValue2;
+     //Setup Input, mimic what mapper would have passed
+      //to the reducer and run test
+      reduceDriver.withInput(new Text(strKey), list);
+      //run the reducer and get its output
+      List<Pair<ImmutableBytesWritable, Writable>> result = reduceDriver.run();
+    
+      //extract key from result and verify
+      assertEquals(Bytes.toString(result.get(0).getFirst().get()), strKey);
+    
+      //extract value for CF/QUALIFIER and verify
+      Put a = (Put)result.get(0).getSecond();
+      String c = Bytes.toString(a.get(CF, QUALIFIER).get(0).getValue());
+      assertEquals(expectedOutput,c );
+   }
+
+}
+----
+
+Your MRUnit test verifies that the output is as expected, the Put that is 
inserted into HBase has the correct value, and the ColumnFamily and 
ColumnQualifier have the correct values.
+
+MRUnit includes a MapperDriver to test mapping jobs, and you can use MRUnit to 
test other operations, including reading from HBase, processing data, or 
writing to HDFS,
+
+== Integration Testing with a HBase Mini-Cluster
+
+HBase ships with HBaseTestingUtility, which makes it easy to write integration 
tests using a [firstterm]_mini-cluster_.
+The first step is to add some dependencies to your Maven POM file.
+Check the versions to be sure they are appropriate.
+
+[source,xml]
+----
+
+<dependency>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-common</artifactId>
+    <version>2.0.0</version>
+    <type>test-jar</type>
+    <scope>test</scope>
+</dependency>
+
+<dependency>
+    <groupId>org.apache.hbase</groupId>
+    <artifactId>hbase</artifactId>
+    <version>0.98.3</version>
+    <type>test-jar</type>
+    <scope>test</scope>
+</dependency>
+        
+<dependency>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdfs</artifactId>
+    <version>2.0.0</version>
+    <type>test-jar</type>
+    <scope>test</scope>
+</dependency>
+
+<dependency>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdfs</artifactId>
+    <version>2.0.0</version>
+    <scope>test</scope>
+</dependency>
+----
+
+This code represents an integration test for the MyDAO insert shown in 
<<unit.tests,unit.tests>>.
+
+[source,java]
+----
+
+public class MyHBaseIntegrationTest {
+    private static HBaseTestingUtility utility;
+    byte[] CF = "CF".getBytes();
+    byte[] QUALIFIER = "CQ-1".getBytes();
+    
+    @Before
+    public void setup() throws Exception {
+       utility = new HBaseTestingUtility();
+       utility.startMiniCluster();
+    }
+
+    @Test
+        public void testInsert() throws Exception {
+                HTableInterface table = 
utility.createTable(Bytes.toBytes("MyTest"),
+                                Bytes.toBytes("CF"));
+                HBaseTestObj obj = new HBaseTestObj();
+                obj.setRowKey("ROWKEY-1");
+                obj.setData1("DATA-1");
+                obj.setData2("DATA-2");
+                MyHBaseDAO.insertRecord(table, obj);
+                Get get1 = new Get(Bytes.toBytes(obj.getRowKey()));
+                get1.addColumn(CF, CQ1);
+                Result result1 = table.get(get1);
+                assertEquals(Bytes.toString(result1.getRow()), 
obj.getRowKey());
+                assertEquals(Bytes.toString(result1.value()), obj.getData1());
+                Get get2 = new Get(Bytes.toBytes(obj.getRowKey()));
+                get2.addColumn(CF, CQ2);
+                Result result2 = table.get(get2);
+                assertEquals(Bytes.toString(result2.getRow()), 
obj.getRowKey());
+                assertEquals(Bytes.toString(result2.value()), obj.getData2());
+    }
+}
+----
+
+This code creates an HBase mini-cluster and starts it.
+Next, it creates a table called `MyTest` with one column family, `CF`.
+A record is inserted, a Get is performed from the same table, and the 
insertion is verified.
+
+NOTE: Starting the mini-cluster takes about 20-30 seconds, but that should be 
appropriate for integration testing. 
+
+To use an HBase mini-cluster on Microsoft Windows, you need to use a Cygwin 
environment.
+
+See the paper at 
link:http://blog.sematext.com/2010/08/30/hbase-case-study-using-hbasetestingutility-for-local-testing-development/[HBase
 Case-Study: Using HBaseTestingUtility for Local Testing and
+                Development] (2010) for more information about 
HBaseTestingUtility.

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb77a925/src/main/asciidoc/_chapters/upgrading.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc 
b/src/main/asciidoc/_chapters/upgrading.adoc
new file mode 100644
index 0000000..e90b98a
--- /dev/null
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -0,0 +1,450 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[[upgrading]]
+= Upgrading
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+You cannot skip major versions upgrading. If you are upgrading from version 
0.90.x to 0.94.x, you must first go from 0.90.x to 0.92.x and then go from 
0.92.x to 0.94.x.
+
+Note:It may be possible to skip across versions -- for example go from 0.92.2 
straight to 0.98.0 just following the 0.96.x upgrade instructions -- but we 
have not tried it so cannot say whether it works or not.
+
+Review <<configuration>>, in particular <<hadoop>>.
+
+[[hbase.versioning]]
+== HBase version number and compatibility
+
+HBase has two versioning schemes, pre-1.0 and post-1.0. Both are detailed 
below.
+
+[[hbase.versioning.post10]]
+=== Post 1.0 versions
+
+Starting with 1.0.0 release, HBase uses link:http://semver.org/[Semantic 
Versioning] for its release versioning. In summary:
+
+.Given a version number MAJOR.MINOR.PATCH, increment the:
+* MAJOR version when you make incompatible API changes,
+* MINOR version when you add functionality in a backwards-compatible manner, 
and
+* PATCH version when you make backwards-compatible bug fixes.
+* Additional labels for pre-release and build metadata are available as 
extensions to the MAJOR.MINOR.PATCH format.
+
+[[hbase.versioning.compat]]
+.Compatibility Dimensions
+In addition to the usual API versioning considerations HBase has other 
compatibility dimensions that we need to consider.
+
+.Client-Server wire protocol compatibility
+* Allows updating client and server out of sync.
+* We could only allow upgrading the server first. I.e. the server would be 
backward compatible to an old client, that way new APIs are OK.
+* Example: A user should be able to use an old client to connect to an 
upgraded cluster.
+
+.Server-Server protocol compatibility
+* Servers of different versions can co-exist in the same cluster.
+* The wire protocol between servers is compatible.
+* Workers for distributed tasks, such as replication and log splitting, can 
co-exist in the same cluster.
+* Dependent protocols (such as using ZK for coordination) will also not be 
changed.
+* Example: A user can perform a rolling upgrade.
+
+.File format compatibility
+* Support file formats backward and forward compatible
+* Example: File, ZK encoding, directory layout is upgraded automatically as 
part of an HBase upgrade. User can rollback to the older version and everything 
will continue to work.
+
+.Client API compatibility
+* Allow changing or removing existing client APIs.
+* An API needs to deprecated for a major version before we will change/remove 
it.
+* Example: A user using a newly deprecated api does not need to modify 
application code with hbase api calls until the next major version.
+
+.Client Binary compatibility
+* Old client code can run unchanged (no recompilation needed) against new jars.
+* Example: Old compiled client code will work unchanged with the new jars.
+
+.Server-Side Limited API compatibility (taken from Hadoop)
+* Internal APIs are marked as Stable, Evolving, or Unstable
+* This implies binary compatibility for coprocessors and plugins (pluggable 
classes, including replication) as long as these are only using marked 
interfaces/classes.
+* Example: Old compiled Coprocessor, Filter, or Plugin code will work 
unchanged with the new jars.
+
+.Dependency Compatibility
+* An upgrade of HBase will not require an incompatible upgrade of a dependent 
project, including the Java runtime.
+* Example: An upgrade of Hadoop will not invalidate any of the compatibilities 
guarantees we made.
+
+.Operational Compatibility
+* Metric changes
+* Behavioral changes of services
+*Web page APIs
+
+.Summary
+* A patch upgrade is a drop-in replacement. Any change that is not Java binary 
compatible would not be allowed.footnote:[See 
http://docs.oracle.com/javase/specs/jls/se7/html/jls-13.html.]
+
+* A minor upgrade requires no application/client code modification. Ideally it 
would be a drop-in replacement but client code, coprocessors, filters, etc 
might have to be recompiled if new jars are used.
+
+* A major upgrade allows the HBase community to make breaking changes.
+
+.Compatibility Matrix footnote:[Note that this indicates what could break, not 
that it will break. We will/should add specifics in our release notes.]
+[cols="1,1,1,1"]
+|===
+| | Major | Minor | Patch
+|Client-Server wire Compatibility|  N |Y |Y
+|Server-Server Compatibility |N |Y |Y
+|File Format Compatibility | N 
footnote:[comp_matrix_offline_upgrade_note,Running an offline upgrade tool 
without rollback might be needed. We will typically only support migrating data 
from major version X to major version X+1.] | Y |Y
+|Client API Compatibility  | N | Y |Y
+|Client Binary Compatibility | N | N |Y
+4+|Server-Side Limited API Compatibility
+>| Stable | N | Y | Y
+>| Evolving | N |N |Y
+>| Unstable | N |N |N
+|Dependency Compatibility | N |Y |Y
+|Operational Compatibility | N |N |Y
+|===
+
+[[hbase.client.api.surface]]
+==== HBase API Surface
+
+HBase has a lot of API points, but for the compatibility matrix above, we 
differentiate between Client API, Limited Private API, and Private API. HBase 
uses a version of 
link:https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Compatibility.html[Hadoop's
 Interface classification]. HBase's Interface classification classes can be 
found 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/classification/package-summary.html[here].
+
+* InterfaceAudience: captures the intended audience, possible values are 
Public (for end users and external projects), LimitedPrivate (for other 
Projects, Coprocessors or other plugin points), and Private (for internal use).
+* InterfaceStability: describes what types of interface changes are permitted. 
Possible values are Stable, Evolving, Unstable, and Deprecated.
+
+[[hbase.client.api]]
+HBase Client API::
+  HBase Client API consists of all the classes or methods that are marked with 
InterfaceAudience.Public interface. All main classes in hbase-client and 
dependent modules have either InterfaceAudience.Public, 
InterfaceAudience.LimitedPrivate, or InterfaceAudience.Private marker. Not all 
classes in other modules (hbase-server, etc) have the marker. If a class is not 
annotated with one of these, it is assumed to be a InterfaceAudience.Private 
class.
+
+[[hbase.limitetprivate.api]]
+HBase LimitedPrivate API::
+  LimitedPrivate annotation comes with a set of target consumers for the 
interfaces. Those consumers are coprocessors, phoenix, replication endpoint 
implemnetations or similar. At this point, HBase only guarantees source and 
binary compatibility for these interfaces between patch versions.
+
+[[hbase.private.api]]
+HBase Private API::
+  All classes annotated with InterfaceAudience.Private or all classes that do 
not have the annotation are for HBase internal use only. The interfaces and 
method signatures can change at any point in time. If you are relying on a 
particular interface that is marked Private, you should open a jira to propose 
changing the interface to be Public or LimitedPrivate, or an interface exposed 
for this purpose.
+
+[[hbase.versioning.pre10]]
+=== Pre 1.0 versions
+
+Before the semantic versioning scheme pre-1.0, HBase tracked either Hadoop's 
versions (0.2x) or 0.9x versions. If you are into the arcane, checkout our old 
wiki page on link:http://wiki.apache.org/hadoop/Hbase/HBaseVersions[HBase 
Versioning] which tries to connect the HBase version dots. Below sections cover 
ONLY the releases before 1.0.
+
+[[hbase.development.series]]
+.Odd/Even Versioning or "Development" Series Releases
+Ahead of big releases, we have been putting up preview versions to start the 
feedback cycle turning-over earlier. These "Development" Series releases, 
always odd-numbered, come with no guarantees, not even regards being able to 
upgrade between two sequential releases (we reserve the right to break 
compatibility across "Development" Series releases). Needless to say, these 
releases are not for production deploys. They are a preview of what is coming 
in the hope that interested parties will take the release for a test drive and 
flag us early if we there are issues we've missed ahead of our rolling a 
production-worthy release.
+
+Our first "Development" Series was the 0.89 set that came out ahead of HBase 
0.90.0. HBase 0.95 is another "Development" Series that portends HBase 0.96.0. 
0.99.x is the last series in "developer preview" mode before 1.0. Afterwards, 
we will be using semantic versioning naming scheme (see above).
+
+[[hbase.binary.compatibility]]
+.Binary Compatibility
+When we say two HBase versions are compatible, we mean that the versions are 
wire and binary compatible. Compatible HBase versions means that clients can 
talk to compatible but differently versioned servers. It means too that you can 
just swap out the jars of one version and replace them with the jars of 
another, compatible version and all will just work. Unless otherwise specified, 
HBase point versions are (mostly) binary compatible. You can safely do rolling 
upgrades between binary compatible versions; i.e. across point versions: e.g. 
from 0.94.5 to 0.94.6. See link:[Does compatibility between versions also mean 
binary compatibility?] discussion on the hbaes dev mailing list.
+
+[[hbase.rolling.upgrade]]
+=== Rolling Upgrades
+
+A rolling upgrade is the process by which you update the servers in your 
cluster a server at a time. You can rolling upgrade across HBase versions if 
they are binary or wire compatible. See <<hbase.rolling.restart>> for more on 
what this means. Coarsely, a rolling upgrade is a graceful stop each server, 
update the software, and then restart. You do this for each server in the 
cluster. Usually you upgrade the Master first and then the regionservers. See 
<<rolling>> for tools that can help use the rolling upgrade process.
+
+For example, in the below, hbase was symlinked to the actual hbase install. On 
upgrade, before running a rolling restart over the cluser, we changed the 
symlink to point at the new HBase software version and then ran
+
+[source,bash]
+----
+$ HADOOP_HOME=~/hadoop-2.6.0-CRC-SNAPSHOT ~/hbase/bin/rolling-restart.sh 
--config ~/conf_hbase
+----
+
+The rolling-restart script will first gracefully stop and restart the master, 
and then each of the regionservers in turn. Because the symlink was changed, on 
restart the server will come up using the new hbase version. Check logs for 
errors as the rolling upgrade proceeds.
+
+[[hbase.rolling.restart]]
+.Rolling Upgrade Between Versions that are Binary/Wire Compatible
+Unless otherwise specified, HBase point versions are binary compatible. You 
can do a <<hbase.rolling.upgrade>> between hbase point versions. For example, 
you can go to 0.94.6 from 0.94.5 by doing a rolling upgrade across the cluster 
replacing the 0.94.5 binary with a 0.94.6 binary.
+
+In the minor version-particular sections below, we call out where the versions 
are wire/protocol compatible and in this case, it is also possible to do a 
<<hbase.rolling.upgrade>>. For example, in <<upgrade1.0.rolling.upgrade>>, we 
state that it is possible to do a rolling upgrade between hbase-0.98.x and 
hbase-1.0.0.
+
+== Upgrade Paths
+
+[[upgrade1.0]]
+=== Upgrading from 0.98.x to 1.0.x
+
+In this section we first note the significant changes that come in with 1.0.0 
HBase and then we go over the upgrade process.  Be sure to read the significant 
changes section with care so you avoid surprises.
+
+==== Changes of Note!
+
+In here we list important changes that are in 1.0.0 since 0.98.x., changes you 
should be aware that will go into effect once you upgrade.
+
+[[zookeeper.3.4]]
+.ZooKeeper 3.4 is required in HBase 1.0.0
+See <<zookeeper.requirements>>.
+
+[[default.ports.changed]]
+.HBase Default Ports Changed
+The ports used by HBase changed.  The used to be in the 600XX range.  In 
hbase-1.0.0 they have been moved up out of the ephemeral port range and are 
160XX instead (Master web UI was 60010 and is now 16010; the RegionServer web 
UI was 60030 and is now 16030, etc). If you want to keep the old port 
locations, copy the port setting configs from _hbase-default.xml_ into 
_hbase-site.xml_, change them back to the old values from hbase-0.98.x era, and 
ensure you've distributed your configurations before you restart.
+
+[[upgrade1.0.hbase.bucketcache.percentage.in.combinedcache]]
+.hbase.bucketcache.percentage.in.combinedcache configuration has been REMOVED
+You may have made use of this configuration if you are using BucketCache. If 
NOT using BucketCache, this change does not effect you. Its removal means that 
your L1 LruBlockCache is now sized using `hfile.block.cache.size` -- i.e. the 
way you would size the onheap L1 LruBlockCache if you were NOT doing 
BucketCache -- and the BucketCache size is not whatever the setting for 
hbase.bucketcache.size is. You may need to adjust configs to get the 
LruBlockCache and BucketCache sizes set to what they were in 0.98.x and 
previous. If you did not set this config., its default value was 0.9. If you do 
nothing, your BucketCache will increase in size by 10%. Your L1 LruBlockCache 
will become hfile.block.cache.size times your java heap size 
(`hfile.block.cache.size` is a float between 0.0 and 1.0). To read more, see 
link:https://issues.apache.org/jira/browse/HBASE-11520[HBASE-11520 Simplify 
offheap cache config by removing the confusing 
"hbase.bucketcache.percentage.in.combinedcache"].
+
+[[hbase-12068]]
+.If you have your own customer filters....
+See the release notes on the issue 
link:https://issues.apache.org/jira/browse/HBASE-12068[HBASE-12068 [Branch-1\] 
Avoid need to always do KeyValueUtil#ensureKeyValue for Filter transformCell]; 
be sure to follow the recommendations therein.
+
+[[dlr]]
+.Distributed Log Replay
+<<distributed.log.replay>> is off by default in hbase-1.0. Enabling it can 
make a big difference improving HBase MTTR. Enable this feature if you are 
doing a clean stop/start when you are upgrading. You cannot rolling upgrade on 
to this feature (caveat if you are running on a version of hbase in excess of 
hbase-0.98.4 -- see 
link:https://issues.apache.org/jira/browse/HBASE-12577[HBASE-12577 Disable 
distributed log replay by default] for more).
+
+[[upgrade1.0.rolling.upgrade]]
+==== Rolling upgrade from 0.98.x to HBase 1.0.0
+.From 0.96.x to 1.0.0
+NOTE: You cannot do a <<hbase.rolling.upgrade,rolling upgrade>> from 0.96.x to 
1.0.0 without first doing a rolling upgrade to 0.98.x. See comment in 
link:https://issues.apache.org/jira/browse/HBASE-11164?focusedCommentId=14182330&amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&#35;comment-14182330[HBASE-11164
 Document and test rolling updates from 0.98 -> 1.0] for the why. Also because 
hbase-1.0.0 enables hfilev3 by default, 
link:https://issues.apache.org/jira/browse/HBASE-9801[HBASE-9801 Change the 
default HFile version to V3], and support for hfilev3 only arrives in 0.98, 
this is another reason you cannot rolling upgrade from hbase-0.96.x; if the 
rolling upgrade stalls, the 0.96.x servers cannot open files written by the 
servers running the newer hbase-1.0.0 hfilev3 writing servers. 
+
+There are no known issues running a <<hbase.rolling.upgrade,rolling upgrade>> 
from hbase-0.98.x to hbase-1.0.0.
+
+[[upgrade1.0.from.0.94]]
+==== Upgrading to 1.0 from 0.94
+You cannot rolling upgrade from 0.94.x to 1.x.x.  You must stop your cluster, 
install the 1.x.x software, run the migration described at 
<<executing.the.0.96.upgrade>> (substituting 1.x.x. wherever we make mention of 
0.96.x in the section below), and then restart.  Be sure to upgrade your 
zookeeper if it is a version less than the required 3.4.x.
+
+[[upgrade0.98]]
+=== Upgrading from 0.96.x to 0.98.x
+A rolling upgrade from 0.96.x to 0.98.x works. The two versions are not binary 
compatible.
+
+Additional steps are required to take advantage of some of the new features of 
0.98.x, including cell visibility labels, cell ACLs, and transparent server 
side encryption. See <<security>> for more information. Significant performance 
improvements include a change to the write ahead log threading model that 
provides higher transaction throughput under high load, reverse scanners, 
MapReduce over snapshot files, and striped compaction.
+
+Clients and servers can run with 0.98.x and 0.96.x versions. However, 
applications may need to be recompiled due to changes in the Java API.
+
+=== Upgrading from 0.94.x to 0.98.x
+A rolling upgrade from 0.94.x directly to 0.98.x does not work. The upgrade 
path follows the same procedures as <<upgrade0.96>>. Additional steps are 
required to use some of the new features of 0.98.x. See <<upgrade0.98>> for an 
abbreviated list of these features.
+
+[[upgrade0.96]]
+=== Upgrading from 0.94.x to 0.96.x
+
+==== The "Singularity"
+
+.HBase 0.96.x was EOL'd, September 1st, 2014
+NOTE: Do not deploy 0.96.x  Deploy a 0.98.x at least. See 
link:https://issues.apache.org/jira/browse/HBASE-11642[EOL 0.96].
+
+You will have to stop your old 0.94.x cluster completely to upgrade. If you 
are replicating between clusters, both clusters will have to go down to 
upgrade. Make sure it is a clean shutdown. The less WAL files around, the 
faster the upgrade will run (the upgrade will split any log files it finds in 
the filesystem as part of the upgrade process). All clients must be upgraded to 
0.96 too.
+
+The API has changed. You will need to recompile your code against 0.96 and you 
may need to adjust applications to go against new APIs (TODO: List of changes).
+
+[[executing.the.0.96.upgrade]]
+==== Executing the 0.96 Upgrade
+
+.HDFS and ZooKeeper must be up!
+NOTE: HDFS and ZooKeeper should be up and running during the upgrade process.
+
+hbase-0.96.0 comes with an upgrade script. Run
+
+[source,bash]
+----
+$ bin/hbase upgrade
+----
+to see its usage. The script has two main modes: `-check`, and `-execute`.
+
+.check
+The check step is run against a running 0.94 cluster. Run it from a downloaded 
0.96.x binary. The check step is looking for the presence of HFileV1 files. 
These are unsupported in hbase-0.96.0. To purge them -- have them rewritten as 
HFileV2 -- you must run a compaction.
+
+The check step prints stats at the end of its run (grep for `“Result:”` in 
the log) printing absolute path of the tables it scanned, any HFileV1 files 
found, the regions containing said files (the regions we need to major compact 
to purge the HFileV1s), and any corrupted files if any found. A corrupt file is 
unreadable, and so is undefined (neither HFileV1 nor HFileV2).
+
+To run the check step, run 
+
+[source,bash]
+----
+$ bin/hbase upgrade -check
+----
+
+Here is sample output:
+----
+Tables Processed:
+hdfs://localhost:41020/myHBase/.META.
+hdfs://localhost:41020/myHBase/usertable
+hdfs://localhost:41020/myHBase/TestTable
+hdfs://localhost:41020/myHBase/t
+
+Count of HFileV1: 2
+HFileV1:
+hdfs://localhost:41020/myHBase/usertable    
/fa02dac1f38d03577bd0f7e666f12812/family/249450144068442524
+hdfs://localhost:41020/myHBase/usertable    
/ecdd3eaee2d2fcf8184ac025555bb2af/family/249450144068442512
+
+Count of corrupted files: 1
+Corrupted Files:
+hdfs://localhost:41020/myHBase/usertable/fa02dac1f38d03577bd0f7e666f12812/family/1
+Count of Regions with HFileV1: 2
+Regions to Major Compact:
+hdfs://localhost:41020/myHBase/usertable/fa02dac1f38d03577bd0f7e666f12812
+hdfs://localhost:41020/myHBase/usertable/ecdd3eaee2d2fcf8184ac025555bb2af
+
+There are some HFileV1, or corrupt files (files with incorrect major version)
+----
+
+In the above sample output, there are two HFileV1 in two regions, and one 
corrupt file. Corrupt files should probably be removed. The regions that have 
HFileV1s need to be major compacted. To major compact, start up the hbase shell 
and review how to compact an individual region. After the major compaction is 
done, rerun the check step and the HFileV1s shoudl be gone, replaced by HFileV2 
instances.
+
+By default, the check step scans the hbase root directory (defined as 
hbase.rootdir in the configuration). To scan a specific directory only, pass 
the -dir option.
+[source,bash]
+----
+$ bin/hbase upgrade -check -dir /myHBase/testTable
+----
+The above command would detect HFileV1s in the /myHBase/testTable directory.
+
+Once the check step reports all the HFileV1 files have been rewritten, it is 
safe to proceed with the upgrade.
+
+.execute
+After the _check_ step shows the cluster is free of HFileV1, it is safe to 
proceed with the upgrade. Next is the _execute_ step. You must *SHUTDOWN YOUR 
0.94.x CLUSTER* before you can run the execute step. The execute step will not 
run if it detects running HBase masters or regionservers.
+
+[NOTE]
+====
+HDFS and ZooKeeper should be up and running during the upgrade process. If 
zookeeper is managed by HBase, then you can start zookeeper so it is available 
to the upgrade by running 
+[source,bash]
+----
+$ ./hbase/bin/hbase-daemon.sh start zookeeper
+----
+====
+
+The execute upgrade step is made of three substeps.
+
+* Namespaces: HBase 0.96.0 has support for namespaces. The upgrade needs to 
reorder directories in the filesystem for namespaces to work.
+
+* ZNodes: All znodes are purged so that new ones can be written in their place 
using a new protobuf'ed format and a few are migrated in place: e.g. 
replication and table state znodes
+
+* WAL Log Splitting: If the 0.94.x cluster shutdown was not clean, we'll split 
WAL logs as part of migration before we startup on 0.96.0. This WAL splitting 
runs slower than the native distributed WAL splitting because it is all inside 
the single upgrade process (so try and get a clean shutdown of the 0.94.0 
cluster if you can).
+
+To run the _execute_ step, make sure that first you have copied hbase-0.96.0 
binaries everywhere under servers and under clients. Make sure the 0.94.0 
cluster is down. Then do as follows:
+[source,bash]
+----
+$ bin/hbase upgrade -execute
+----
+Here is some sample output.
+
+----
+Starting Namespace upgrade
+Created version file at hdfs://localhost:41020/myHBase with version=7
+Migrating table testTable to 
hdfs://localhost:41020/myHBase/.data/default/testTable
+.....
+Created version file at hdfs://localhost:41020/myHBase with version=8
+Successfully completed NameSpace upgrade.
+Starting Znode upgrade
+.....
+Successfully completed Znode upgrade
+
+Starting Log splitting
+...
+Successfully completed Log splitting
+----
+         
+If the output from the execute step looks good, stop the zookeeper instance 
you started to do the upgrade:
+[source,bash]
+----
+$ ./hbase/bin/hbase-daemon.sh stop zookeeper
+----
+Now start up hbase-0.96.0.
+
+[[s096.migration.troubleshooting]]
+=== Troubleshooting
+
+[[s096.migration.troubleshooting.old.client]]
+.Old Client connecting to 0.96 cluster
+It will fail with an exception like the below. Upgrade.
+----
+17:22:15  Exception in thread "main" java.lang.IllegalArgumentException: Not a 
host:port pair: PBUF
+17:22:15  *
+17:22:15   api-compat-8.ent.cloudera.com ��  ���(
+17:22:15    at 
org.apache.hadoop.hbase.util.Addressing.parseHostname(Addressing.java:60)
+17:22:15    at org.apache.hadoop.hbase.ServerName.&init>(ServerName.java:101)
+17:22:15    at 
org.apache.hadoop.hbase.ServerName.parseVersionedServerName(ServerName.java:283)
+17:22:15    at 
org.apache.hadoop.hbase.MasterAddressTracker.bytesToServerName(MasterAddressTracker.java:77)
+17:22:15    at 
org.apache.hadoop.hbase.MasterAddressTracker.getMasterAddress(MasterAddressTracker.java:61)
+17:22:15    at 
org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.getMaster(HConnectionManager.java:703)
+17:22:15    at 
org.apache.hadoop.hbase.client.HBaseAdmin.&init>(HBaseAdmin.java:126)
+17:22:15    at Client_4_3_0.setup(Client_4_3_0.java:716)
+17:22:15    at Client_4_3_0.main(Client_4_3_0.java:63)
+----
+
+==== Upgrading `META` to use Protocol Buffers (Protobuf)
+
+When you upgrade from versions prior to 0.96, `META` needs to be converted to 
use protocol buffers. This is controlled by the configuration option 
`hbase.MetaMigrationConvertingToPB`, which is set to `true` by default. 
Therefore, by default, no action is required on your part.
+
+The migration is a one-time event. However, every time your cluster starts, 
`META` is scanned to ensure that it does not need to be converted. If you have 
a very large number of regions, this scan can take a long time. Starting in 
0.98.5, you can set `hbase.MetaMigrationConvertingToPB` to `false` in 
_hbase-site.xml_, to disable this start-up scan. This should be considered an 
expert-level setting.
+
+[[upgrade0.94]]
+=== Upgrading from 0.92.x to 0.94.x
+We used to think that 0.92 and 0.94 were interface compatible and that you can 
do a rolling upgrade between these versions but then we figured that 
link:https://issues.apache.org/jira/browse/HBASE-5357[";>]HBASE-5357 Use builder 
pattern in HColumnDescriptor] changed method signatures so rather than return 
void they instead return HColumnDescriptor. This will 
throw`java.lang.NoSuchMethodError: 
org.apache.hadoop.hbase.HColumnDescriptor.setMaxVersions(I)V` so 0.92 and 0.94 
are NOT compatible. You cannot do a rolling upgrade between them.
+
+[[upgrade0.92]]
+=== Upgrading from 0.90.x to 0.92.x
+==== Upgrade Guide
+ou will find that 0.92.0 runs a little differently to 0.90.x releases. Here 
are a few things to watch out for upgrading from 0.90.x to 0.92.0.
+
+.tl:dr
+[NOTE]
+====
+If you've not patience, here are the important things to know upgrading.
+. Once you upgrade, you can’t go back.
+
+. MSLAB is on by default. Watch that heap usage if you have a lot of regions.
+
+. Distributed Log Splitting is on by default. It should make region server 
failover faster.
+
+. There’s a separate tarball for security.
+
+. If `-XX:MaxDirectMemorySize` is set in your _hbase-env.sh_, it’s going to 
enable the experimental off-heap cache (You may not want this).
+====
+
+.You can’t go back!
+To move to 0.92.0, all you need to do is shutdown your cluster, replace your 
hbase 0.90.x with hbase 0.92.0 binaries (be sure you clear out all 0.90.x 
instances) and restart (You cannot do a rolling restart from 0.90.x to 0.92.x 
-- you must restart). On startup, the `.META.` table content is rewritten 
removing the table schema from the `info:regioninfo` column. Also, any flushes 
done post first startup will write out data in the new 0.92.0 file format, 
<<hfilev2>>. This means you cannot go back to 0.90.x once you’ve started 
HBase 0.92.0 over your HBase data directory.
+
+.MSLAB is ON by default
+In 0.92.0, the 
`<<hbase.hregion.memstore.mslab.enabled,hbase.hregion.memstore.mslab.enabled>>` 
flag is set to `true` (See <<gcpause>>). In 0.90.x it was false. When it is 
enabled, memstores will step allocate memory in MSLAB 2MB chunks even if the 
memstore has zero or just a few small elements. This is fine usually but if you 
had lots of regions per regionserver in a 0.90.x cluster (and MSLAB was off), 
you may find yourself OOME'ing on upgrade because the `thousands of regions * 
number of column families * 2MB MSLAB` (at a minimum) puts your heap over the 
top. Set `hbase.hregion.memstore.mslab.enabled` to `false` or set the MSLAB 
size down from 2MB by setting `hbase.hregion.memstore.mslab.chunksize` to 
something less.
+
+[[dls]]
+.Distributed Log Splitting is on by default
+Previous, WAL logs on crash were split by the Master alone. In 0.92.0, log 
splitting is done by the cluster (See 
link:https://issues.apache.org/jira/browse/hbase-1364[HBASE-1364 [performance\] 
Distributed splitting of regionserver commit logs] or see the blog post 
link:http://blog.cloudera.com/blog/2012/07/hbase-log-splitting/[Apache HBase 
Log Splitting]). This should cut down significantly on the amount of time it 
takes splitting logs and getting regions back online again.
+
+.Memory accounting is different now
+In 0.92.0, <<hfilev2>> indices and bloom filters take up residence in the same 
LRU used caching blocks that come from the filesystem. In 0.90.x, the HFile v1 
indices lived outside of the LRU so they took up space even if the index was on 
a ‘cold’ file, one that wasn’t being actively used. With the indices now 
in the LRU, you may find you have less space for block caching. Adjust your 
block cache accordingly. See the <<block.cache>> for more detail. The block 
size default size has been changed in 0.92.0 from 0.2 (20 percent of heap) to 
0.25.
+
+.On the Hadoop version to use
+Run 0.92.0 on Hadoop 1.0.x (or CDH3u3 when it ships). The performance benefits 
are worth making the move. Otherwise, our Hadoop prescription is as it has 
been; you need an Hadoop that supports a working sync. See <<hadoop>>.
+
+If running on Hadoop 1.0.x (or CDH3u3), enable local read. See 
link:http://files.meetup.com/1350427/hug_ebay_jdcryans.pdf[Practical Caching] 
presentation for ruminations on the performance benefits ‘going local’ (and 
for how to enable local reads).
+
+.HBase 0.92.0 ships with ZooKeeper 3.4.2
+If you can, upgrade your zookeeper. If you can’t, 3.4.2 clients should work 
against 3.3.X ensembles (HBase makes use of 3.4.2 API).
+
+.Online alter is off by default
+In 0.92.0, we’ve added an experimental online schema alter facility (See 
<<hbase.online.schema.update.enable,hbase.online.schema.update.enable>>). Its 
off by default. Enable it at your own risk. Online alter and splitting tables 
do not play well together so be sure your cluster quiescent using this feature 
(for now).
+
+.WebUI
+The webui has had a few additions made in 0.92.0. It now shows a list of the 
regions currently transitioning, recent compactions/flushes, and a process list 
of running processes (usually empty if all is well and requests are being 
handled promptly). Other additions including requests by region, a debugging 
servlet dump, etc.
+
+.Security tarball
+We now ship with two tarballs; secure and insecure HBase. Documentation on how 
to setup a secure HBase is on the way.
+
+.Changes in HBase replication
+0.92.0 adds two new features: multi-slave and multi-master replication. The 
way to enable this is the same as adding a new peer, so in order to have 
multi-master you would just run add_peer for each cluster that acts as a master 
to the other slave clusters. Collisions are handled at the timestamp level 
which may or may not be what you want, this needs to be evaluated on a per use 
case basis. Replication is still experimental in 0.92 and is disabled by 
default, run it at your own risk.
+
+.RegionServer now aborts if OOME
+If an OOME, we now have the JVM kill -9 the regionserver process so it goes 
down fast. Previous, a RegionServer might stick around after incurring an OOME 
limping along in some wounded state. To disable this facility, and recommend 
you leave it in place, you’d need to edit the bin/hbase file. Look for the 
addition of the -XX:OnOutOfMemoryError="kill -9 %p" arguments (See 
link:https://issues.apache.org/jira/browse/HBASE-4769[HBASE-4769 - ‘Abort 
RegionServer Immediately on OOME’]).
+
+.HFile V2 and the “Bigger, Fewer” Tendency
+0.92.0 stores data in a new format, <<hfilev2>>. As HBase runs, it will move 
all your data from HFile v1 to HFile v2 format. This auto-migration will run in 
the background as flushes and compactions run. HFile V2 allows HBase run with 
larger regions/files. In fact, we encourage that all HBasers going forward tend 
toward Facebook axiom #1, run with larger, fewer regions. If you have lots of 
regions now -- more than 100s per host -- you should look into setting your 
region size up after you move to 0.92.0 (In 0.92.0, default size is now 1G, up 
from 256M), and then running online merge tool (See 
link:https://issues.apache.org/jira/browse/HBASE-1621[HBASE-1621 merge tool 
should work on online cluster, but disabled table]).
+
+[[upgrade0.90]]
+=== Upgrading to HBase 0.90.x from 0.20.x or 0.89.x
+This version of 0.90.x HBase can be started on data written by HBase 0.20.x or 
HBase 0.89.x. There is no need of a migration step. HBase 0.89.x and 0.90.x 
does write out the name of region directories differently -- it names them with 
a md5 hash of the region name rather than a jenkins hash -- so this means that 
once started, there is no going back to HBase 0.20.x.
+
+Be sure to remove the _hbase-default.xml_ from your _conf_ directory on 
upgrade. A 0.20.x version of this file will have sub-optimal configurations for 
0.90.x HBase. The _hbase-default.xml_ file is now bundled into the HBase jar 
and read from there. If you would like to review the content of this file, see 
it in the src tree at _src/main/resources/hbase-default.xml_ or see 
<<hbase_default_configurations>>.
+
+Finally, if upgrading from 0.20.x, check your .META. schema in the shell. In 
the past we would recommend that users run with a 16kb MEMSTORE_FLUSHSIZE. Run
+----
+hbase> scan '-ROOT-'
+----
+in the shell. This will output the current `.META.` schema. Check 
`MEMSTORE_FLUSHSIZE` size. Is it 16kb (16384)? If so, you will need to change 
this (The 'normal'/default value is 64MB (67108864)). Run the script 
`bin/set_meta_memstore_size.rb`. This will make the necessary edit to your 
`.META.` schema. Failure to run this change will make for a slow cluster. See 
link:https://issues.apache.org/jira/browse/HBASE-3499[HBASE-3499 Users 
upgrading to 0.90.0 need to have their .META. table updated with the right 
MEMSTORE_SIZE].
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb77a925/src/main/asciidoc/_chapters/ycsb.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/ycsb.adoc 
b/src/main/asciidoc/_chapters/ycsb.adoc
new file mode 100644
index 0000000..d8ec628
--- /dev/null
+++ b/src/main/asciidoc/_chapters/ycsb.adoc
@@ -0,0 +1,42 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[appendix]
+== YCSB
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+link:https://github.com/brianfrankcooper/YCSB/[YCSB: The
+            Yahoo! Cloud Serving Benchmark] and HBase
+
+TODO: Describe how YCSB is poor for putting up a decent cluster load.
+
+TODO: Describe setup of YCSB for HBase.
+In particular, presplit your tables before you start a run.
+See link:https://issues.apache.org/jira/browse/HBASE-4163[HBASE-4163 Create 
Split Strategy for YCSB Benchmark] for why and a little shell command for how 
to do it.
+
+Ted Dunning redid YCSB so it's mavenized and added facility for verifying 
workloads.
+See link:https://github.com/tdunning/YCSB[Ted Dunning's YCSB].
+
+:numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb77a925/src/main/asciidoc/_chapters/zookeeper.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/zookeeper.adoc 
b/src/main/asciidoc/_chapters/zookeeper.adoc
new file mode 100644
index 0000000..f6134b7
--- /dev/null
+++ b/src/main/asciidoc/_chapters/zookeeper.adoc
@@ -0,0 +1,451 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[[zookeeper]]
+= ZooKeeper(((ZooKeeper)))
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+A distributed Apache HBase installation depends on a running ZooKeeper cluster.
+All participating nodes and clients need to be able to access the running 
ZooKeeper ensemble.
+Apache HBase by default manages a ZooKeeper "cluster" for you.
+It will start and stop the ZooKeeper ensemble as part of the HBase start/stop 
process.
+You can also manage the ZooKeeper ensemble independent of HBase and just point 
HBase at the cluster it should use.
+To toggle HBase management of ZooKeeper, use the `HBASE_MANAGES_ZK` variable 
in _conf/hbase-env.sh_.
+This variable, which defaults to `true`, tells HBase whether to start/stop the 
ZooKeeper ensemble servers as part of HBase start/stop.
+
+When HBase manages the ZooKeeper ensemble, you can specify ZooKeeper 
configuration using its native _zoo.cfg_ file, or, the easier option is to just 
specify ZooKeeper options directly in _conf/hbase-site.xml_.
+A ZooKeeper configuration option can be set as a property in the HBase 
_hbase-site.xml_ XML configuration file by prefacing the ZooKeeper option name 
with `hbase.zookeeper.property`.
+For example, the `clientPort` setting in ZooKeeper can be changed by setting 
the `hbase.zookeeper.property.clientPort` property.
+For all default values used by HBase, including ZooKeeper configuration, see 
<<hbase_default_configurations,hbase default configurations>>.
+Look for the `hbase.zookeeper.property` prefix.
+For the full list of ZooKeeper configurations, see ZooKeeper's _zoo.cfg_.
+HBase does not ship with a _zoo.cfg_ so you will need to browse the _conf_ 
directory in an appropriate ZooKeeper download.
+
+You must at least list the ensemble servers in _hbase-site.xml_ using the 
`hbase.zookeeper.quorum` property.
+This property defaults to a single ensemble member at `localhost` which is not 
suitable for a fully distributed HBase.
+(It binds to the local machine only and remote clients will not be able to 
connect). 
+
+.How many ZooKeepers should I run?
+[NOTE]
+====
+You can run a ZooKeeper ensemble that comprises 1 node only but in production 
it is recommended that you run a ZooKeeper ensemble of 3, 5 or 7 machines; the 
more members an ensemble has, the more tolerant the ensemble is of host 
failures.
+Also, run an odd number of machines.
+In ZooKeeper, an even number of peers is supported, but it is normally not 
used because an even sized ensemble requires, proportionally, more peers to 
form a quorum than an odd sized ensemble requires.
+For example, an ensemble with 4 peers requires 3 to form a quorum, while an 
ensemble with 5 also requires 3 to form a quorum.
+Thus, an ensemble of 5 allows 2 peers to fail, and thus is more fault tolerant 
than the ensemble of 4, which allows only 1 down peer. 
+
+Give each ZooKeeper server around 1GB of RAM, and if possible, its own 
dedicated disk (A dedicated disk is the best thing you can do to ensure a 
performant ZooKeeper ensemble). For very heavily loaded clusters, run ZooKeeper 
servers on separate machines from RegionServers (DataNodes and TaskTrackers).
+====
+
+For example, to have HBase manage a ZooKeeper quorum on nodes 
_rs{1,2,3,4,5}.example.com_, bound to port 2222 (the default is 2181) ensure 
`HBASE_MANAGE_ZK` is commented out or set to `true` in _conf/hbase-env.sh_ and 
then edit _conf/hbase-site.xml_    and set 
`hbase.zookeeper.property.clientPort` and `hbase.zookeeper.quorum`.
+You should also set `hbase.zookeeper.property.dataDir` to other than the 
default as the default has ZooKeeper persist data under _/tmp_ which is often 
cleared on system restart.
+In the example below we have ZooKeeper persist to _/user/local/zookeeper_.
+
+[source,java]
+----
+
+  <configuration>
+    ...
+    <property>
+      <name>hbase.zookeeper.property.clientPort</name>
+      <value>2222</value>
+      <description>Property from ZooKeeper's config zoo.cfg.
+      The port at which the clients will connect.
+      </description>
+    </property>
+    <property>
+      <name>hbase.zookeeper.quorum</name>
+      
<value>rs1.example.com,rs2.example.com,rs3.example.com,rs4.example.com,rs5.example.com</value>
+      <description>Comma separated list of servers in the ZooKeeper Quorum.
+      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+      By default this is set to localhost for local and pseudo-distributed 
modes
+      of operation. For a fully-distributed setup, this should be set to a full
+      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in 
hbase-env.sh
+      this is the list of servers which we will start/stop ZooKeeper on.
+      </description>
+    </property>
+    <property>
+      <name>hbase.zookeeper.property.dataDir</name>
+      <value>/usr/local/zookeeper</value>
+      <description>Property from ZooKeeper's config zoo.cfg.
+      The directory where the snapshot is stored.
+      </description>
+    </property>
+    ...
+  </configuration>
+----
+
+.What verion of ZooKeeper should I use?
+[CAUTION]
+====
+The newer version, the better.
+For example, some folks have been bitten by 
link:https://issues.apache.org/jira/browse/ZOOKEEPER-1277[ZOOKEEPER-1277].
+If running zookeeper 3.5+, you can ask hbase to make use of the new multi 
operation by enabling <<hbase.zookeeper.usemulti,hbase.zookeeper.useMulti>>" in 
your _hbase-site.xml_. 
+====
+
+.ZooKeeper Maintenance
+[CAUTION]
+====
+Be sure to set up the data dir cleaner described under 
link:http://zookeeper.apache.org/doc/r3.1.2/zookeeperAdmin.html#sc_maintenance[Zookeeper
+        Maintenance] else you could have 'interesting' problems a couple of 
months in; i.e.
+zookeeper could start dropping sessions if it has to run through a directory 
of hundreds of thousands of logs which is wont to do around leader reelection 
time -- a process rare but run on occasion whether because a machine is dropped 
or happens to hiccup.
+====
+
+== Using existing ZooKeeper ensemble
+
+To point HBase at an existing ZooKeeper cluster, one that is not managed by 
HBase, set `HBASE_MANAGES_ZK` in _conf/hbase-env.sh_ to false
+
+----
+
+  ...
+  # Tell HBase whether it should manage its own instance of Zookeeper or not.
+  export HBASE_MANAGES_ZK=false
+----
+
+Next set ensemble locations and client port, if non-standard, in 
_hbase-site.xml_, or add a suitably configured _zoo.cfg_ to HBase's _CLASSPATH_.
+HBase will prefer the configuration found in _zoo.cfg_ over any settings in 
_hbase-site.xml_.
+
+When HBase manages ZooKeeper, it will start/stop the ZooKeeper servers as a 
part of the regular start/stop scripts.
+If you would like to run ZooKeeper yourself, independent of HBase start/stop, 
you would do the following
+
+----
+
+${HBASE_HOME}/bin/hbase-daemons.sh {start,stop} zookeeper
+----
+
+Note that you can use HBase in this manner to spin up a ZooKeeper cluster, 
unrelated to HBase.
+Just make sure to set `HBASE_MANAGES_ZK` to `false`      if you want it to 
stay up across HBase restarts so that when HBase shuts down, it doesn't take 
ZooKeeper down with it.
+
+For more information about running a distinct ZooKeeper cluster, see the 
ZooKeeper 
link:http://hadoop.apache.org/zookeeper/docs/current/zookeeperStarted.html[Getting
+        Started Guide].
+Additionally, see the 
link:http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A7[ZooKeeper Wiki] or the 
link:http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper
+        documentation] for more information on ZooKeeper sizing. 
+
+[[zk.sasl.auth]]
+== SASL Authentication with ZooKeeper
+
+Newer releases of Apache HBase (>= 0.92) will support connecting to a 
ZooKeeper Quorum that supports SASL authentication (which is available in 
Zookeeper versions 3.4.0 or later).
+
+This describes how to set up HBase to mutually authenticate with a ZooKeeper 
Quorum.
+ZooKeeper/HBase mutual authentication 
(link:https://issues.apache.org/jira/browse/HBASE-2418[HBASE-2418]) is required 
as part of a complete secure HBase configuration 
(link:https://issues.apache.org/jira/browse/HBASE-3025[HBASE-3025]). For 
simplicity of explication, this section ignores additional configuration 
required (Secure HDFS and Coprocessor configuration). It's recommended to begin 
with an HBase-managed Zookeeper configuration (as opposed to a standalone 
Zookeeper quorum) for ease of learning. 
+
+=== Operating System Prerequisites
+
+You need to have a working Kerberos KDC setup.
+For each `$HOST` that will run a ZooKeeper server, you should have a principle 
`zookeeper/$HOST`.
+For each such host, add a service key (using the `kadmin` or `kadmin.local`    
    tool's `ktadd` command) for `zookeeper/$HOST` and copy this file to 
`$HOST`, and make it readable only to the user that will run zookeeper on 
`$HOST`.
+Note the location of this file, which we will use below as 
_$PATH_TO_ZOOKEEPER_KEYTAB_. 
+
+Similarly, for each `$HOST` that will run an HBase server (master or 
regionserver), you should have a principle: `hbase/$HOST`.
+For each host, add a keytab file called _hbase.keytab_ containing a service 
key for `hbase/$HOST`, copy this file to `$HOST`, and make it readable only to 
the user that will run an HBase service on `$HOST`.
+Note the location of this file, which we will use below as 
_$PATH_TO_HBASE_KEYTAB_. 
+
+Each user who will be an HBase client should also be given a Kerberos 
principal.
+This principal should usually have a password assigned to it (as opposed to, 
as with the HBase servers, a keytab file) which only this user knows.
+The client's principal's `maxrenewlife` should be set so that it can be 
renewed enough so that the user can complete their HBase client processes.
+For example, if a user runs a long-running HBase client process that takes at 
most 3 days, we might create this user's principal within `kadmin` with: 
`addprinc -maxrenewlife 3days`.
+The Zookeeper client and server libraries manage their own ticket refreshment 
by running threads that wake up periodically to do the refreshment. 
+
+On each host that will run an HBase client (e.g. `hbase shell`), add the 
following file to the HBase home directory's _conf_ directory:
+
+[source,java]
+----
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=false
+  useTicketCache=true;
+};
+----
+
+We'll refer to this JAAS configuration file as _$CLIENT_CONF_        below.
+
+=== HBase-managed Zookeeper Configuration
+
+On each node that will run a zookeeper, a master, or a regionserver, create a 
link:http://docs.oracle.com/javase/1.4.2/docs/guide/security/jgss/tutorials/LoginConfigFile.html[JAAS]
        configuration file in the conf directory of the node's _HBASE_HOME_     
   directory that looks like the following:
+
+[source,java]
+----
+
+Server {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  keyTab="$PATH_TO_ZOOKEEPER_KEYTAB"
+  storeKey=true
+  useTicketCache=false
+  principal="zookeeper/$HOST";
+};
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  useTicketCache=false
+  keyTab="$PATH_TO_HBASE_KEYTAB"
+  principal="hbase/$HOST";
+};
+----
+
+where the _$PATH_TO_HBASE_KEYTAB_ and _$PATH_TO_ZOOKEEPER_KEYTAB_ files are 
what you created above, and `$HOST` is the hostname for that node.
+
+The `Server` section will be used by the Zookeeper quorum server, while the 
`Client` section will be used by the HBase master and regionservers.
+The path to this file should be substituted for the text _$HBASE_SERVER_CONF_ 
in the _hbase-env.sh_ listing below.
+
+The path to this file should be substituted for the text _$CLIENT_CONF_ in the 
_hbase-env.sh_ listing below. 
+
+Modify your _hbase-env.sh_ to include the following:
+
+[source,bourne]
+----
+
+export HBASE_OPTS="-Djava.security.auth.login.config=$CLIENT_CONF"
+export HBASE_MANAGES_ZK=true
+export 
HBASE_ZOOKEEPER_OPTS="-Djava.security.auth.login.config=$HBASE_SERVER_CONF"
+export HBASE_MASTER_OPTS="-Djava.security.auth.login.config=$HBASE_SERVER_CONF"
+export 
HBASE_REGIONSERVER_OPTS="-Djava.security.auth.login.config=$HBASE_SERVER_CONF"
+----
+
+where _$HBASE_SERVER_CONF_ and _$CLIENT_CONF_ are the full paths to the JAAS 
configuration files created above.
+
+Modify your _hbase-site.xml_ on each node that will run zookeeper, master or 
regionserver to contain:
+
+[source,java]
+----
+
+<configuration>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>$ZK_NODES</value>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.authProvider.1</name>
+    <value>org.apache.zookeeper.server.auth.SASLAuthenticationProvider</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.kerberos.removeHostFromPrincipal</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.kerberos.removeRealmFromPrincipal</name>
+    <value>true</value>
+  </property>
+</configuration>
+----
+
+where `$ZK_NODES` is the comma-separated list of hostnames of the Zookeeper 
Quorum hosts.
+
+Start your hbase cluster by running one or more of the following set of 
commands on the appropriate hosts: 
+
+----
+
+bin/hbase zookeeper start
+bin/hbase master start
+bin/hbase regionserver start
+----
+
+=== External Zookeeper Configuration
+
+Add a JAAS configuration file that looks like:
+
+[source,java]
+----
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  useTicketCache=false
+  keyTab="$PATH_TO_HBASE_KEYTAB"
+  principal="hbase/$HOST";
+};
+----
+
+where the _$PATH_TO_HBASE_KEYTAB_ is the keytab created above for HBase 
services to run on this host, and `$HOST` is the hostname for that node.
+Put this in the HBase home's configuration directory.
+We'll refer to this file's full pathname as _$HBASE_SERVER_CONF_ below.
+
+Modify your hbase-env.sh to include the following:
+
+[source,bourne]
+----
+
+export HBASE_OPTS="-Djava.security.auth.login.config=$CLIENT_CONF"
+export HBASE_MANAGES_ZK=false
+export HBASE_MASTER_OPTS="-Djava.security.auth.login.config=$HBASE_SERVER_CONF"
+export 
HBASE_REGIONSERVER_OPTS="-Djava.security.auth.login.config=$HBASE_SERVER_CONF"
+----
+
+Modify your _hbase-site.xml_ on each node that will run a master or 
regionserver to contain:
+
+[source,xml]
+----
+
+<configuration>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>$ZK_NODES</value>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+  </property>
+</configuration>
+----
+
+where `$ZK_NODES` is the comma-separated list of hostnames of the Zookeeper 
Quorum hosts.
+
+Add a _zoo.cfg_ for each Zookeeper Quorum host containing:
+
+[source,java]
+----
+
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+----
+
+Also on each of these hosts, create a JAAS configuration file containing:
+
+[source,java]
+----
+
+Server {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  keyTab="$PATH_TO_ZOOKEEPER_KEYTAB"
+  storeKey=true
+  useTicketCache=false
+  principal="zookeeper/$HOST";
+};
+----
+
+where `$HOST` is the hostname of each Quorum host.
+We will refer to the full pathname of this file as _$ZK_SERVER_CONF_ below. 
+
+Start your Zookeepers on each Zookeeper Quorum host with:
+
+[source,bourne]
+----
+
+SERVER_JVMFLAGS="-Djava.security.auth.login.config=$ZK_SERVER_CONF" 
bin/zkServer start
+----
+
+Start your HBase cluster by running one or more of the following set of 
commands on the appropriate nodes: 
+
+----
+
+bin/hbase master start
+bin/hbase regionserver start
+----
+
+=== Zookeeper Server Authentication Log Output
+
+If the configuration above is successful, you should see something similar to 
the following in your Zookeeper server logs:
+
+----
+
+11/12/05 22:43:39 INFO zookeeper.Login: successfully logged in.
+11/12/05 22:43:39 INFO server.NIOServerCnxnFactory: binding to port 
0.0.0.0/0.0.0.0:2181
+11/12/05 22:43:39 INFO zookeeper.Login: TGT refresh thread started.
+11/12/05 22:43:39 INFO zookeeper.Login: TGT valid starting at:        Mon Dec 
05 22:43:39 UTC 2011
+11/12/05 22:43:39 INFO zookeeper.Login: TGT expires:                  Tue Dec 
06 22:43:39 UTC 2011
+11/12/05 22:43:39 INFO zookeeper.Login: TGT refresh sleeping until: Tue Dec 06 
18:36:42 UTC 2011
+..
+11/12/05 22:43:59 INFO auth.SaslServerCallbackHandler:
+  Successfully authenticated client: 
authenticationID=hbase/ip-10-166-175-249.us-west-1.compute.internal@HADOOP.LOCALDOMAIN;
+  
authorizationID=hbase/ip-10-166-175-249.us-west-1.compute.internal@HADOOP.LOCALDOMAIN.
+11/12/05 22:43:59 INFO auth.SaslServerCallbackHandler: Setting authorizedID: 
hbase
+11/12/05 22:43:59 INFO server.ZooKeeperServer: adding SASL authorization for 
authorizationID: hbase
+----
+
+=== Zookeeper Client Authentication Log Output
+
+On the Zookeeper client side (HBase master or regionserver), you should see 
something similar to the following:
+
+----
+
+11/12/05 22:43:59 INFO zookeeper.ZooKeeper: Initiating client connection, 
connectString=ip-10-166-175-249.us-west-1.compute.internal:2181 
sessionTimeout=180000 watcher=master:60000
+11/12/05 22:43:59 INFO zookeeper.ClientCnxn: Opening socket connection to 
server /10.166.175.249:2181
+11/12/05 22:43:59 INFO zookeeper.RecoverableZooKeeper: The identifier of this 
process is 14851@ip-10-166-175-249
+11/12/05 22:43:59 INFO zookeeper.Login: successfully logged in.
+11/12/05 22:43:59 INFO client.ZooKeeperSaslClient: Client will use GSSAPI as 
SASL mechanism.
+11/12/05 22:43:59 INFO zookeeper.Login: TGT refresh thread started.
+11/12/05 22:43:59 INFO zookeeper.ClientCnxn: Socket connection established to 
ip-10-166-175-249.us-west-1.compute.internal/10.166.175.249:2181, initiating 
session
+11/12/05 22:43:59 INFO zookeeper.Login: TGT valid starting at:        Mon Dec 
05 22:43:59 UTC 2011
+11/12/05 22:43:59 INFO zookeeper.Login: TGT expires:                  Tue Dec 
06 22:43:59 UTC 2011
+11/12/05 22:43:59 INFO zookeeper.Login: TGT refresh sleeping until: Tue Dec 06 
18:30:37 UTC 2011
+11/12/05 22:43:59 INFO zookeeper.ClientCnxn: Session establishment complete on 
server ip-10-166-175-249.us-west-1.compute.internal/10.166.175.249:2181, 
sessionid = 0x134106594320000, negotiated timeout = 180000
+----
+
+=== Configuration from Scratch
+
+This has been tested on the current standard Amazon Linux AMI.
+First setup KDC and principals as described above.
+Next checkout code and run a sanity check.
+
+----
+
+git clone git://git.apache.org/hbase.git
+cd hbase
+mvn clean test -Dtest=TestZooKeeperACL
+----
+
+Then configure HBase as described above.
+Manually edit target/cached_classpath.txt (see below): 
+
+----
+
+bin/hbase zookeeper &
+bin/hbase master &
+bin/hbase regionserver &
+----
+
+=== Future improvements
+
+==== Fix target/cached_classpath.txt
+
+You must override the standard hadoop-core jar file from the 
`target/cached_classpath.txt` file with the version containing the HADOOP-7070 
fix.
+You can use the following script to do this:
+
+----
+
+echo `find ~/.m2 -name "*hadoop-core*7070*SNAPSHOT.jar"` ':' `cat 
target/cached_classpath.txt` | sed 's/ //g' > target/tmp.txt
+mv target/tmp.txt target/cached_classpath.txt
+----
+
+==== Set JAAS configuration programmatically
+
+This would avoid the need for a separate Hadoop jar that fixes 
link:https://issues.apache.org/jira/browse/HADOOP-7070[HADOOP-7070]. 
+
+==== Elimination of `kerberos.removeHostFromPrincipal` 
and`kerberos.removeRealmFromPrincipal`
+
+
+
+ifdef::backend-docbook[]
+[index]
+= Index
+// Generated automatically by the DocBook toolchain.
+endif::backend-docbook[]

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb77a925/src/main/asciidoc/book.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/book.adoc b/src/main/asciidoc/book.adoc
new file mode 100644
index 0000000..790a23c
--- /dev/null
+++ b/src/main/asciidoc/book.adoc
@@ -0,0 +1,84 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+= Apache HBase (TM) Reference Guide image:hbase_logo.png[] 
image:jumping-orca_rotated_25percent.png[]
+:Author: Apache HBase Team
+:Email: <hbase-...@lists.apache.org>
+:doctype: book
+:numbered:
+:toc: left
+:toclevels: 1
+:toc-title: Contents
+:icons: font
+:iconsdir: icons
+:linkcss:
+:experimental:
+:source-language: java
+
+
+:leveloffset: 0
+
+// The directory is called _chapters because asciidoctor skips direct
+// processing of files found in directories starting with an _. This
+// prevents each chapter being built as its own book.j
+
+include::_chapters/preface.adoc[]
+
+include::_chapters/getting_started.adoc[]
+include::_chapters/configuration.adoc[]
+include::_chapters/upgrading.adoc[]
+include::_chapters/shell.adoc[]
+include::_chapters/datamodel.adoc[]
+include::_chapters/schema_design.adoc[]
+include::_chapters/mapreduce.adoc[]
+include::_chapters/security.adoc[]
+include::_chapters/architecture.adoc[]
+include::_chapters/hbase_apis.adoc[]
+include::_chapters/external_apis.adoc[]
+include::_chapters/thrift_filter_language.adoc[]
+include::_chapters/cp.adoc[]
+include::_chapters/performance.adoc[]
+include::_chapters/troubleshooting.adoc[]
+include::_chapters/case_studies.adoc[]
+include::_chapters/ops_mgt.adoc[]
+include::_chapters/developer.adoc[]
+include::_chapters/unit_testing.adoc[]
+include::_chapters/zookeeper.adoc[]
+include::_chapters/community.adoc[]
+
+= Appendix
+
+include::_chapters/appendix_contributing_to_documentation.adoc[]
+include::_chapters/faq.adoc[]
+include::_chapters/hbck_in_depth.adoc[]
+include::_chapters/appendix_acl_matrix.adoc[]
+include::_chapters/compression.adoc[]
+include::_chapters/sql.adoc[]
+include::_chapters/ycsb.adoc[]
+include::_chapters/appendix_hfile_format.adoc[]
+include::_chapters/other_info.adoc[]
+include::_chapters/hbase_history.adoc[]
+include::_chapters/asf.adoc[]
+include::_chapters/orca.adoc[]
+include::_chapters/tracing.adoc[]
+include::_chapters/rpc.adoc[]
+
+

Reply via email to