http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
new file mode 100644
index 0000000..988872e
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java
@@ -0,0 +1,109 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestScannerModel extends TestModelBase<ScannerModel> {
+  private static final String PRIVATE = "private";
+  private static final String PUBLIC = "public";
+  private static final byte[] START_ROW = Bytes.toBytes("abracadabra");
+  private static final byte[] END_ROW = Bytes.toBytes("zzyzx");
+  private static final byte[] COLUMN1 = Bytes.toBytes("column1");
+  private static final byte[] COLUMN2 = Bytes.toBytes("column2:foo");
+  private static final long START_TIME = 1245219839331L;
+  private static final long END_TIME = 1245393318192L;
+  private static final int CACHING = 1000;
+  private static final int BATCH = 100;
+  private static final boolean CACHE_BLOCKS = false;
+
+  public TestScannerModel() throws Exception {
+    super(ScannerModel.class);
+    AS_XML = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
+        + "<Scanner batch=\"100\" cacheBlocks=\"false\" caching=\"1000\" 
endRow=\"enp5eng=\" "
+        + "endTime=\"1245393318192\" maxVersions=\"2147483647\" 
startRow=\"YWJyYWNhZGFicmE=\" "
+        + "startTime=\"1245219839331\">"
+        + "<column>Y29sdW1uMQ==</column><column>Y29sdW1uMjpmb28=</column>"
+        + "<labels>private</labels><labels>public</labels>"
+        + "</Scanner>";
+
+    AS_JSON = 
"{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\","
+        + 
"\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\","
+        + 
"\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"],"
+        +"\"labels\":[\"private\",\"public\"]"
+        +"}";
+
+    AS_PB = 
"CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mf"
+        + "JDj/////B0joB1IHcHJpdmF0ZVIGcHVibGljWAA=";
+  }
+
+  protected ScannerModel buildTestModel() {
+    ScannerModel model = new ScannerModel();
+    model.setStartRow(START_ROW);
+    model.setEndRow(END_ROW);
+    model.addColumn(COLUMN1);
+    model.addColumn(COLUMN2);
+    model.setStartTime(START_TIME);
+    model.setEndTime(END_TIME);
+    model.setBatch(BATCH);
+    model.setCaching(CACHING);
+    model.addLabel(PRIVATE);
+    model.addLabel(PUBLIC);
+    model.setCacheBlocks(CACHE_BLOCKS);
+    return model;
+  }
+
+  protected void checkModel(ScannerModel model) {
+    assertTrue(Bytes.equals(model.getStartRow(), START_ROW));
+    assertTrue(Bytes.equals(model.getEndRow(), END_ROW));
+    boolean foundCol1 = false, foundCol2 = false;
+    for (byte[] column : model.getColumns()) {
+      if (Bytes.equals(column, COLUMN1)) {
+        foundCol1 = true;
+      } else if (Bytes.equals(column, COLUMN2)) {
+        foundCol2 = true;
+      }
+    }
+    assertTrue(foundCol1);
+    assertTrue(foundCol2);
+    assertEquals(model.getStartTime(), START_TIME);
+    assertEquals(model.getEndTime(), END_TIME);
+    assertEquals(model.getBatch(), BATCH);
+    assertEquals(model.getCaching(), CACHING);
+    assertEquals(model.getCacheBlocks(), CACHE_BLOCKS);
+    boolean foundLabel1 = false;
+    boolean foundLabel2 = false;
+    if (model.getLabels() != null && model.getLabels().size() > 0) {
+      for (String label : model.getLabels()) {
+        if (label.equals(PRIVATE)) {
+          foundLabel1 = true;
+        } else if (label.equals(PUBLIC)) {
+          foundLabel2 = true;
+        }
+      }
+      assertTrue(foundLabel1);
+      assertTrue(foundLabel2);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
new file mode 100644
index 0000000..0f852ca
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
@@ -0,0 +1,145 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestStorageClusterStatusModel extends 
TestModelBase<StorageClusterStatusModel> {
+
+  public TestStorageClusterStatusModel() throws Exception {
+    super(StorageClusterStatusModel.class);
+
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
+      "<ClusterStatus averageLoad=\"1.0\" regions=\"2\" requests=\"0\">" +
+      "<DeadNodes/><LiveNodes>" +
+      "<Node heapSizeMB=\"128\" maxHeapSizeMB=\"1024\" name=\"test1\" 
requests=\"0\" startCode=\"1245219839331\">" +
+      "<Region currentCompactedKVs=\"1\" memstoreSizeMB=\"0\" 
name=\"aGJhc2U6cm9vdCwsMA==\" readRequestsCount=\"1\" " +
+      "rootIndexSizeKB=\"1\" storefileIndexSizeMB=\"0\" storefileSizeMB=\"0\" 
storefiles=\"1\" stores=\"1\" " +
+      "totalCompactingKVs=\"1\" totalStaticBloomSizeKB=\"1\" 
totalStaticIndexSizeKB=\"1\" writeRequestsCount=\"2\"/>" +
+      "</Node>" +
+      "<Node heapSizeMB=\"512\" maxHeapSizeMB=\"1024\" name=\"test2\" 
requests=\"0\" startCode=\"1245239331198\">" +
+      "<Region currentCompactedKVs=\"1\" memstoreSizeMB=\"0\" 
name=\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\" " +
+      "readRequestsCount=\"1\" rootIndexSizeKB=\"1\" 
storefileIndexSizeMB=\"0\" storefileSizeMB=\"0\" " +
+      "storefiles=\"1\" stores=\"1\" totalCompactingKVs=\"1\" 
totalStaticBloomSizeKB=\"1\" " +
+      "totalStaticIndexSizeKB=\"1\" 
writeRequestsCount=\"2\"/></Node></LiveNodes></ClusterStatus>";
+
+    AS_PB =
+      
"Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" +
+      
"UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" +
+      "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8=";
+
+
+    //Using jackson will break json backward compatibilty for this 
representation
+    //but the original one was broken as it would only print one Node element
+    //so the format itself was broken
+    AS_JSON =
+      
"{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\","
 +
+          
"\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," 
+
+          
"\"storefileSizeMB\":0,\"memstoreSizeMB\":0,\"storefileIndexSizeMB\":0," +
+          
"\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
+          
"\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,"
 +
+          
"\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," +
+          "\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," +
+          
"\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," +
+          
"\"storefiles\":1,\"storefileSizeMB\":0,\"memstoreSizeMB\":0,\"storefileIndexSizeMB\":0,"
 +
+          
"\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
+          
"\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,"
 +
+          
"\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," +
+          "\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}";
+  }
+
+  protected StorageClusterStatusModel buildTestModel() {
+    StorageClusterStatusModel model = new StorageClusterStatusModel();
+    model.setRegions(2);
+    model.setRequests(0);
+    model.setAverageLoad(1.0);
+    model.addLiveNode("test1", 1245219839331L, 128, 1024)
+      .addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 
1, 1);
+    model.addLiveNode("test2", 1245239331198L, 512, 1024)
+      .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 
1, 0, 0, 0,
+          1, 2, 1, 1, 1, 1, 1);
+    return model;
+  }
+
+  protected void checkModel(StorageClusterStatusModel model) {
+    assertEquals(model.getRegions(), 2);
+    assertEquals(model.getRequests(), 0);
+    assertEquals(model.getAverageLoad(), 1.0);
+    Iterator<StorageClusterStatusModel.Node> nodes =
+      model.getLiveNodes().iterator();
+    StorageClusterStatusModel.Node node = nodes.next();
+    assertEquals(node.getName(), "test1");
+    assertEquals(node.getStartCode(), 1245219839331L);
+    assertEquals(node.getHeapSizeMB(), 128);
+    assertEquals(node.getMaxHeapSizeMB(), 1024);
+    Iterator<StorageClusterStatusModel.Node.Region> regions = 
+      node.getRegions().iterator();
+    StorageClusterStatusModel.Node.Region region = regions.next();
+    assertTrue(Bytes.toString(region.getName()).equals(
+        "hbase:root,,0"));
+    assertEquals(region.getStores(), 1);
+    assertEquals(region.getStorefiles(), 1);
+    assertEquals(region.getStorefileSizeMB(), 0);
+    assertEquals(region.getMemstoreSizeMB(), 0);
+    assertEquals(region.getStorefileIndexSizeMB(), 0);
+    assertEquals(region.getReadRequestsCount(), 1);
+    assertEquals(region.getWriteRequestsCount(), 2);
+    assertEquals(region.getRootIndexSizeKB(), 1);
+    assertEquals(region.getTotalStaticIndexSizeKB(), 1);
+    assertEquals(region.getTotalStaticBloomSizeKB(), 1);
+    assertEquals(region.getTotalCompactingKVs(), 1);
+    assertEquals(region.getCurrentCompactedKVs(), 1);
+    assertFalse(regions.hasNext());
+    node = nodes.next();
+    assertEquals(node.getName(), "test2");
+    assertEquals(node.getStartCode(), 1245239331198L);
+    assertEquals(node.getHeapSizeMB(), 512);
+    assertEquals(node.getMaxHeapSizeMB(), 1024);
+    regions = node.getRegions().iterator();
+    region = regions.next();
+    assertEquals(Bytes.toString(region.getName()),
+        TableName.META_TABLE_NAME+",,1246000043724");
+    assertEquals(region.getStores(), 1);
+    assertEquals(region.getStorefiles(), 1);
+    assertEquals(region.getStorefileSizeMB(), 0);
+    assertEquals(region.getMemstoreSizeMB(), 0);
+    assertEquals(region.getStorefileIndexSizeMB(), 0);
+    assertEquals(region.getReadRequestsCount(), 1);
+    assertEquals(region.getWriteRequestsCount(), 2);
+    assertEquals(region.getRootIndexSizeKB(), 1);
+    assertEquals(region.getTotalStaticIndexSizeKB(), 1);
+    assertEquals(region.getTotalStaticBloomSizeKB(), 1);
+    assertEquals(region.getTotalCompactingKVs(), 1);
+    assertEquals(region.getCurrentCompactedKVs(), 1);
+    
+    assertFalse(regions.hasNext());
+    assertFalse(nodes.hasNext());
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
new file mode 100644
index 0000000..bd4fa1f
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java
@@ -0,0 +1,60 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestStorageClusterVersionModel extends 
TestModelBase<StorageClusterVersionModel> {
+  private static final String VERSION = "0.0.1-testing";
+
+  public TestStorageClusterVersionModel() throws Exception {
+    super(StorageClusterVersionModel.class);
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"+
+      "<ClusterVersion>" + VERSION + "</ClusterVersion>";
+
+    AS_JSON = "\"0.0.1-testing\"";
+  }
+
+  protected StorageClusterVersionModel buildTestModel() {
+    StorageClusterVersionModel model = new StorageClusterVersionModel();
+    model.setVersion(VERSION);
+    return model;
+  }
+
+  protected void checkModel(StorageClusterVersionModel model) {
+    assertEquals(model.getVersion(), VERSION);
+  }
+
+  @Override
+  public void testFromPB() throws Exception {
+    //ignore test no pb
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
new file mode 100644
index 0000000..dadb9ad
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java
@@ -0,0 +1,96 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableInfoModel extends TestModelBase<TableInfoModel> {
+  private static final String TABLE = "testtable";
+  private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
+  private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
+  private static final long ID = 8731042424L;
+  private static final String LOCATION = "testhost:9876";
+
+  public TestTableInfoModel() throws Exception {
+    super(TableInfoModel.class);
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><TableInfo 
" +
+      "name=\"testtable\"><Region endKey=\"enp5eng=\" id=\"8731042424\" " +
+      "location=\"testhost:9876\" " +
+      
"name=\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\" " +
+      "startKey=\"YWJyYWNhZGJyYQ==\"/></TableInfo>";
+
+    AS_PB =
+      
"Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" +
+      "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY=";
+
+    AS_JSON =
+      
"{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424,"
 +
+      "\"location\":\"testhost:9876\",\"" +
+      
"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\""
 +
+      "startKey\":\"YWJyYWNhZGJyYQ==\"}]}";
+  }
+
+  protected TableInfoModel buildTestModel() {
+    TableInfoModel model = new TableInfoModel();
+    model.setName(TABLE);
+    model.add(new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION));
+    return model;
+  }
+
+  protected void checkModel(TableInfoModel model) {
+    assertEquals(model.getName(), TABLE);
+    Iterator<TableRegionModel> regions = model.getRegions().iterator();
+    TableRegionModel region = regions.next();
+    assertTrue(Bytes.equals(region.getStartKey(), START_KEY));
+    assertTrue(Bytes.equals(region.getEndKey(), END_KEY));
+    assertEquals(region.getId(), ID);
+    assertEquals(region.getLocation(), LOCATION);
+    assertFalse(regions.hasNext());
+  }
+
+  public void testBuildModel() throws Exception {
+    checkModel(buildTestModel());
+  }
+
+  public void testFromXML() throws Exception {
+    checkModel(fromXML(AS_XML));
+  }
+
+  public void testFromPB() throws Exception {
+    checkModel(fromPB(AS_PB));
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
new file mode 100644
index 0000000..4cb9194
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java
@@ -0,0 +1,73 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableListModel extends TestModelBase<TableListModel> {
+  private static final String TABLE1 = "table1";
+  private static final String TABLE2 = "table2";
+  private static final String TABLE3 = "table3";
+
+  public TestTableListModel() throws Exception {
+    super(TableListModel.class);
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" 
standalone=\"yes\"?><TableList><table " +
+          "name=\"table1\"/><table name=\"table2\"/><table 
name=\"table3\"/></TableList>";
+
+    AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz";
+
+    AS_JSON =
+      
"{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}";
+  }
+
+  protected TableListModel buildTestModel() {
+    TableListModel model = new TableListModel();
+    model.add(new TableModel(TABLE1));
+    model.add(new TableModel(TABLE2));
+    model.add(new TableModel(TABLE3));
+    return model;
+  }
+
+  protected void checkModel(TableListModel model) {
+    Iterator<TableModel> tables = model.getTables().iterator();
+    TableModel table = tables.next();
+    assertEquals(table.getName(), TABLE1);
+    table = tables.next();
+    assertEquals(table.getName(), TABLE2);
+    table = tables.next();
+    assertEquals(table.getName(), TABLE3);
+    assertFalse(tables.hasNext());
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
new file mode 100644
index 0000000..5c4b1a9
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java
@@ -0,0 +1,93 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableRegionModel extends TestModelBase<TableRegionModel> {
+  private static final String TABLE = "testtable";
+  private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
+  private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
+  private static final long ID = 8731042424L;
+  private static final String LOCATION = "testhost:9876";
+
+  public TestTableRegionModel() throws Exception {
+    super(TableRegionModel.class);
+
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Region 
endKey=\"enp5eng=\" " +
+          "id=\"8731042424\" location=\"testhost:9876\" " +
+          
"name=\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\" " +
+          "startKey=\"YWJyYWNhZGJyYQ==\"/>";
+
+    AS_JSON =
+      
"{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," +
+          
"\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\""
 +
+          "startKey\":\"YWJyYWNhZGJyYQ==\"}";
+  }
+
+  protected TableRegionModel buildTestModel() {
+    TableRegionModel model =
+      new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION);
+    return model;
+  }
+
+  protected void checkModel(TableRegionModel model) {
+    assertTrue(Bytes.equals(model.getStartKey(), START_KEY));
+    assertTrue(Bytes.equals(model.getEndKey(), END_KEY));
+    assertEquals(model.getId(), ID);
+    assertEquals(model.getLocation(), LOCATION);
+    assertEquals(model.getName(), 
+      TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) +
+      ".ad9860f031282c46ed431d7af8f94aca.");
+  }
+
+  public void testGetName() {
+    TableRegionModel model = buildTestModel();
+    String modelName = model.getName();
+    HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE),
+      START_KEY, END_KEY, false, ID);
+    assertEquals(modelName, hri.getRegionNameAsString());
+  }
+
+  public void testSetName() {
+    TableRegionModel model = buildTestModel();
+    String name = model.getName();
+    model.setName(name);
+    assertEquals(name, model.getName());
+  }
+
+  @Override
+  public void testFromPB() throws Exception {
+    //no pb ignore
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
new file mode 100644
index 0000000..b725f7b
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java
@@ -0,0 +1,117 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestTableSchemaModel extends TestModelBase<TableSchemaModel> {
+
+  public static final String TABLE_NAME = "testTable";
+  private static final boolean IS_META = false;
+  private static final boolean IS_ROOT = false;
+  private static final boolean READONLY = false;
+
+  TestColumnSchemaModel testColumnSchemaModel;
+
+  private JAXBContext context;
+
+  public TestTableSchemaModel() throws Exception {
+    super(TableSchemaModel.class);
+    testColumnSchemaModel = new TestColumnSchemaModel();
+
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
+      "<TableSchema name=\"testTable\" IS_META=\"false\" IS_ROOT=\"false\" 
READONLY=\"false\">" +
+      "<ColumnSchema name=\"testcolumn\" BLOCKSIZE=\"16384\" 
BLOOMFILTER=\"NONE\" " +
+      "BLOCKCACHE=\"true\" COMPRESSION=\"GZ\" VERSIONS=\"1\" TTL=\"86400\" 
IN_MEMORY=\"false\"/>" +
+      "</TableSchema>";
+
+    AS_PB =
+      
"Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" +
+      
"TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" +
+      
"RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" +
+      "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA";
+
+    AS_JSON =
+      "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," +
+      "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," +
+      
"\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," +
+      
"\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}";
+  }
+
+  protected TableSchemaModel buildTestModel() {
+    return buildTestModel(TABLE_NAME);
+  }
+
+  public TableSchemaModel buildTestModel(String name) {
+    TableSchemaModel model = new TableSchemaModel();
+    model.setName(name);
+    model.__setIsMeta(IS_META);
+    model.__setIsRoot(IS_ROOT);
+    model.__setReadOnly(READONLY);
+    model.addColumnFamily(testColumnSchemaModel.buildTestModel());
+    return model;
+  }
+
+  protected void checkModel(TableSchemaModel model) {
+    checkModel(model, TABLE_NAME);
+  }
+
+  public void checkModel(TableSchemaModel model, String tableName) {
+    assertEquals(model.getName(), tableName);
+    assertEquals(model.__getIsMeta(), IS_META);
+    assertEquals(model.__getIsRoot(), IS_ROOT);
+    assertEquals(model.__getReadOnly(), READONLY);
+    Iterator<ColumnSchemaModel> families = model.getColumns().iterator();
+    assertTrue(families.hasNext());
+    ColumnSchemaModel family = families.next();
+    testColumnSchemaModel.checkModel(family);
+    assertFalse(families.hasNext());
+  }
+
+  public void testBuildModel() throws Exception {
+    checkModel(buildTestModel());
+  }
+
+  public void testFromXML() throws Exception {
+    checkModel(fromXML(AS_XML));
+  }
+
+  public void testFromPB() throws Exception {
+    checkModel(fromPB(AS_PB));
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
----------------------------------------------------------------------
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
new file mode 100644
index 0000000..553bb35
--- /dev/null
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java
@@ -0,0 +1,80 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestVersionModel extends TestModelBase<VersionModel> {
+  private static final String REST_VERSION = "0.0.1";
+  private static final String OS_VERSION = 
+    "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64";
+  private static final String JVM_VERSION =
+    "Sun Microsystems Inc. 1.6.0_13-11.3-b02";
+  private static final String JETTY_VERSION = "6.1.14";
+  private static final String JERSEY_VERSION = "1.1.0-ea";
+  
+  public TestVersionModel() throws Exception {
+    super(VersionModel.class);
+    AS_XML =
+      "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Version 
JVM=\"Sun " +
+          "Microsystems Inc. 1.6.0_13-11.3-b02\" Jersey=\"1.1.0-ea\" " +
+          "OS=\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\" REST=\"0.0.1\" 
Server=\"6.1.14\"/>";
+
+    AS_PB =
+      
"CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" +
+      
"LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE=";
+
+    AS_JSON =
+      "{\"JVM\":\"Sun Microsystems Inc. 
1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," +
+          "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" +
+          "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}";
+  }
+
+  protected VersionModel buildTestModel() {
+    VersionModel model = new VersionModel();
+    model.setRESTVersion(REST_VERSION);
+    model.setOSVersion(OS_VERSION);
+    model.setJVMVersion(JVM_VERSION);
+    model.setServerVersion(JETTY_VERSION);
+    model.setJerseyVersion(JERSEY_VERSION);
+    return model;
+  }
+
+  protected void checkModel(VersionModel model) {
+    assertEquals(model.getRESTVersion(), REST_VERSION);
+    assertEquals(model.getOSVersion(), OS_VERSION);
+    assertEquals(model.getJVMVersion(), JVM_VERSION);
+    assertEquals(model.getServerVersion(), JETTY_VERSION);
+    assertEquals(model.getJerseyVersion(), JERSEY_VERSION);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/resources/hbase-site.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/hbase-site.xml 
b/hbase-rest/src/test/resources/hbase-site.xml
new file mode 100644
index 0000000..8c8312c
--- /dev/null
+++ b/hbase-rest/src/test/resources/hbase-site.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as hbase:meta scanner and 
log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.event.waiting.time</name>
+    <value>50</value>
+    <description>Time to sleep between checks to see if a table event took 
place.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>0</value>
+    <description>Always have masters and regionservers come up on port '0' so 
we don't clash over
+      default ports.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>0</value>
+    <description>Always have masters and regionservers come up on port '0' so 
we don't clash over
+      default ports.
+    </description>
+  </property>
+  <property>
+    <name>hbase.ipc.client.fallback-to-simple-auth-allowed</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 
seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always 
off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>
+    Set to true to skip the 'hbase.defaults.for.version'.
+    Setting this to true can be useful in contexts other than
+    the other side of a maven generation; i.e. running in an
+    ide.  You'll want to set this boolean to true to avoid
+    seeing the RuntimException complaint: "hbase-default.xml file
+    seems to be for and old version of HBase (@@@VERSION@@@), this
+    version is X.X.X-SNAPSHOT"
+    </description>
+  </property>
+  <property>
+    <name>hbase.table.sanity.checks</name>
+    <value>false</value>
+    <description>Skip sanity checks in tests
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/resources/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/hdfs-site.xml 
b/hbase-rest/src/test/resources/hdfs-site.xml
new file mode 100644
index 0000000..03be0c7
--- /dev/null
+++ b/hbase-rest/src/test/resources/hdfs-site.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <!-- hadoop-2.0.5+'s HDFS-4305 by default enforces a min blocks size
+       of 1024*1024.  Many unit tests that use the hlog use smaller
+       blocks.  Setting this config to 0 to have tests pass -->
+  <property>
+    <name>dfs.namenode.fs-limits.min-block-size</name>
+    <value>0</value>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/log4j.properties 
b/hbase-rest/src/test/resources/log4j.properties
new file mode 100644
index 0000000..6ee91ef
--- /dev/null
+++ b/hbase-rest/src/test/resources/log4j.properties
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): 
%m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] 
%C{2}(%L): %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+
+#These two settings are workarounds against spurious logs from the minicluster.
+#See HBASE-4709
+log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
+log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR
+# Enable this to get detailed connection error/retry logging.
+# 
log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/resources/mapred-queues.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/mapred-queues.xml 
b/hbase-rest/src/test/resources/mapred-queues.xml
new file mode 100644
index 0000000..43f3e2a
--- /dev/null
+++ b/hbase-rest/src/test/resources/mapred-queues.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<!-- This is the template for queue configuration. The format supports nesting 
of
+     queues within queues - a feature called hierarchical queues. All queues 
are
+     defined within the 'queues' tag which is the top level element for this
+     XML document.
+     The 'aclsEnabled' attribute should be set to true, if ACLs should be 
checked
+     on queue operations such as submitting jobs, killing jobs etc. -->
+<queues aclsEnabled="false">
+
+  <!-- Configuration for a queue is specified by defining a 'queue' element. 
-->
+  <queue>
+
+    <!-- Name of a queue. Queue name cannot contain a ':'  -->
+    <name>default</name>
+
+    <!-- properties for a queue, typically used by schedulers,
+    can be defined here -->
+    <properties>
+    </properties>
+
+       <!-- State of the queue. If running, the queue will accept new jobs.
+         If stopped, the queue will not accept new jobs. -->
+    <state>running</state>
+
+    <!-- Specifies the ACLs to check for submitting jobs to this queue.
+         If set to '*', it allows all users to submit jobs to the queue.
+         For specifying a list of users and groups the format to use is
+         user1,user2 group1,group2 -->
+    <acl-submit-job>*</acl-submit-job>
+
+    <!-- Specifies the ACLs to check for modifying jobs in this queue.
+         Modifications include killing jobs, tasks of jobs or changing
+         priorities.
+         If set to '*', it allows all users to submit jobs to the queue.
+         For specifying a list of users and groups the format to use is
+         user1,user2 group1,group2 -->
+    <acl-administer-jobs>*</acl-administer-jobs>
+  </queue>
+
+  <!-- Here is a sample of a hierarchical queue configuration
+       where q2 is a child of q1. In this example, q2 is a leaf level
+       queue as it has no queues configured within it. Currently, ACLs
+       and state are only supported for the leaf level queues.
+       Note also the usage of properties for the queue q2.
+  <queue>
+    <name>q1</name>
+    <queue>
+      <name>q2</name>
+      <properties>
+        <property key="capacity" value="20"/>
+        <property key="user-limit" value="30"/>
+      </properties>
+    </queue>
+  </queue>
+ -->
+</queues>

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/resources/mapred-site.xml
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/mapred-site.xml 
b/hbase-rest/src/test/resources/mapred-site.xml
new file mode 100644
index 0000000..787ffb7
--- /dev/null
+++ b/hbase-rest/src/test/resources/mapred-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+<property>
+  <name>mapred.map.child.java.opts</name>
+  <value>-Djava.awt.headless=true</value>
+</property>
+
+<property>
+  <name>mapred.reduce.child.java.opts</name>
+  <value>-Djava.awt.headless=true</value>
+</property>
+</configuration>
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-rest/src/test/resources/zoo.cfg
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/resources/zoo.cfg 
b/hbase-rest/src/test/resources/zoo.cfg
new file mode 100644
index 0000000..a7b8ec8
--- /dev/null
+++ b/hbase-rest/src/test/resources/zoo.cfg
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/tmp/hbase-test-zookeeper-deleteme
+# the port at which the clients will connect
+clientPort=9999
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+autopurge.purgeInterval=1
+
+server.1=i-am-a-test-server:7999:8999

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 6d3697e..ce1efb1 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -127,8 +127,6 @@
                 <jspcompiler uriroot="${src.webapps}/master" 
outputdir="${generated.sources}/java" 
package="org.apache.hadoop.hbase.generated.master" 
webxml="${build.webapps}/master/WEB-INF/web.xml"/>
                 <mkdir dir="${build.webapps}/regionserver/WEB-INF"/>
                 <jspcompiler uriroot="${src.webapps}/regionserver" 
outputdir="${generated.sources}/java" 
package="org.apache.hadoop.hbase.generated.regionserver" 
webxml="${build.webapps}/regionserver/WEB-INF/web.xml"/>
-                <mkdir dir="${build.webapps}/rest/WEB-INF"/>
-                <jspcompiler uriroot="${src.webapps}/rest" 
outputdir="${generated.sources}/java" 
package="org.apache.hadoop.hbase.generated.rest" 
webxml="${build.webapps}/rest/WEB-INF/web.xml"/>
               </target>
             </configuration>
             <goals>
@@ -431,27 +429,6 @@
       <groupId>io.netty</groupId>
       <artifactId>netty-all</artifactId>
     </dependency>
-    <!-- REST dependencies -->
-    <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-json</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-    </dependency>
     <!-- tracing Dependencies -->
     <dependency>
       <groupId>org.htrace</groupId>
@@ -721,34 +698,6 @@
             <artifactId>hadoop-maven-plugins</artifactId>
             <executions>
               <execution>
-                <id>compile-protoc</id>
-                <phase>generate-sources</phase>
-                <goals>
-                  <goal>protoc</goal>
-                </goals>
-                <configuration>
-                  <imports>
-                    
<param>${basedir}/src/main/resources/org/apache/hadoop/hbase/rest/protobuf</param>
-                  </imports>
-                  <source>
-                    <!-- These should be under src/main/protobuf -->
-                    
<directory>${basedir}/src/main/resources/org/apache/hadoop/hbase/rest/protobuf</directory>
-                    <includes>
-                      <include>CellMessage.proto</include>
-                      <include>CellSetMessage.proto</include>
-                      <include>ColumnSchemaMessage.proto</include>
-                      <include>ScannerMessage.proto</include>
-                      <include>StorageClusterStatusMessage.proto</include>
-                      <include>TableInfoMessage.proto</include>
-                      <include>TableListMessage.proto</include>
-                      <include>TableSchemaMessage.proto</include>
-                      <include>VersionMessage.proto</include>
-                    </includes>
-                  </source>
-                 <output>${basedir}/src/main/java/</output>
-                </configuration>
-              </execution>
-              <execution>
                 <id>compile-test-protoc</id>
                 <phase>generate-test-sources</phase>
                 <goals>

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
deleted file mode 100644
index 505dbb3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-
-/**
- * Common constants for org.apache.hadoop.hbase.rest
- */
[email protected]
[email protected]
-public interface Constants {
-  // All constants in a public interface are 'public static final'
-
-  String VERSION_STRING = "0.0.3";
-
-  int DEFAULT_MAX_AGE = 60 * 60 * 4;  // 4 hours
-
-  int DEFAULT_LISTEN_PORT = 8080;
-
-  String MIMETYPE_TEXT = "text/plain";
-  String MIMETYPE_HTML = "text/html";
-  String MIMETYPE_XML = "text/xml";
-  String MIMETYPE_BINARY = "application/octet-stream";
-  String MIMETYPE_PROTOBUF = "application/x-protobuf";
-  String MIMETYPE_PROTOBUF_IETF = "application/protobuf";
-  String MIMETYPE_JSON = "application/json";
-
-  String CRLF = "\r\n";
-
-  String REST_KEYTAB_FILE = "hbase.rest.keytab.file";
-  String REST_KERBEROS_PRINCIPAL = "hbase.rest.kerberos.principal";
-  String REST_AUTHENTICATION_TYPE = "hbase.rest.authentication.type";
-  String REST_AUTHENTICATION_PRINCIPAL = 
"hbase.rest.authentication.kerberos.principal";
-
-  String REST_SSL_ENABLED = "hbase.rest.ssl.enabled";
-  String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store";
-  String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password";
-  String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword";
-
-  String REST_DNS_NAMESERVER = "hbase.rest.dns.nameserver";
-  String REST_DNS_INTERFACE = "hbase.rest.dns.interface";
-
-  String FILTER_CLASSES = "hbase.rest.filter.classes";
-  String SCAN_START_ROW = "startrow";
-  String SCAN_END_ROW = "endrow";
-  String SCAN_COLUMN = "column";
-  String SCAN_START_TIME = "starttime";
-  String SCAN_END_TIME = "endtime";
-  String SCAN_MAX_VERSIONS = "maxversions";
-  String SCAN_BATCH_SIZE = "batchsize";
-  String SCAN_LIMIT = "limit";
-  String SCAN_FETCH_SIZE = "hbase.rest.scan.fetchsize";
-  String SCAN_FILTER = "filter"; 
-  String CUSTOM_FILTERS = "hbase.rest.custom.filters"; 
-
-  String ROW_KEYS_PARAM_NAME = "row";
-  /** If this query parameter is present when processing row or scanner 
resources,
-      it disables server side block caching */
-  String NOCACHE_PARAM_NAME = "nocache";
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
deleted file mode 100644
index 90b3302..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
[email protected]
-public class ExistsResource extends ResourceBase {
-
-  static CacheControl cacheControl;
-  static {
-    cacheControl = new CacheControl();
-    cacheControl.setNoCache(true);
-    cacheControl.setNoTransform(false);
-  }
-
-  TableResource tableResource;
-
-  /**
-   * Constructor
-   * @param tableResource
-   * @throws IOException
-   */
-  public ExistsResource(TableResource tableResource) throws IOException {
-    super();
-    this.tableResource = tableResource;
-  }
-
-  @GET
-  @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
-    MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY})
-  public Response get(final @Context UriInfo uriInfo) {
-    try {
-      if (!tableResource.exists()) {
-        return Response.status(Response.Status.NOT_FOUND)
-          .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
-          .build();
-      }
-    } catch (IOException e) {
-      return Response.status(Response.Status.SERVICE_UNAVAILABLE)
-        .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
-        .build();
-    }
-    ResponseBuilder response = Response.ok();
-    response.cacheControl(cacheControl);
-    return response.build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
deleted file mode 100644
index e31037a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-
-import org.apache.hadoop.hbase.rest.MetricsRESTSource;
-
[email protected]
-public class MetricsREST {
-
-  public MetricsRESTSource getSource() {
-    return source;
-  }
-
-  private MetricsRESTSource source;
-
-  public MetricsREST() {
-     source = 
CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
-  }
-  
-  /**
-   * @param inc How much to add to requests.
-   */
-  public void incrementRequests(final int inc) {
-    source.incrementRequests(inc);
-  }
-  
-  /**
-   * @param inc How much to add to sucessfulGetCount.
-   */
-  public void incrementSucessfulGetRequests(final int inc) {
-    source.incrementSucessfulGetRequests(inc);
-  }
-  
-  /**
-   * @param inc How much to add to sucessfulPutCount.
-   */
-  public void incrementSucessfulPutRequests(final int inc) {
-    source.incrementSucessfulPutRequests(inc);
-  }
-
-  /**
-   * @param inc How much to add to failedPutCount.
-   */
-  public void incrementFailedPutRequests(final int inc) {
-    source.incrementFailedPutRequests(inc);
-  }
-  
-  /**
-   * @param inc How much to add to failedGetCount.
-   */
-  public void incrementFailedGetRequests(final int inc) {
-    source.incrementFailedGetRequests(inc);
-  }
-
-  /**
-   * @param inc How much to add to sucessfulDeleteCount.
-   */
-  public void incrementSucessfulDeleteRequests(final int inc) {
-    source.incrementSucessfulDeleteRequests(inc);
-  }
-
-  /**
-   * @param inc How much to add to failedDeleteCount.
-   */
-  public void incrementFailedDeleteRequests(final int inc) {
-    source.incrementFailedDeleteRequests(inc);
-  }
-
-  /**
-   * @param inc How much to add to sucessfulScanCount.
-   */
-  public synchronized void incrementSucessfulScanRequests(final int inc) {
-    source.incrementSucessfulScanRequests(inc);
-  }
-
-  /**
-   * @param inc How much to add to failedScanCount.
-   */
-  public void incrementFailedScanRequests(final int inc) {
-    source.incrementFailedScanRequests(inc);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
deleted file mode 100644
index c88ac91..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-
[email protected]
-public class MultiRowResource extends ResourceBase implements Constants {
-  private static final Log LOG = LogFactory.getLog(MultiRowResource.class);
-
-  TableResource tableResource;
-  Integer versions = null;
-
-  /**
-   * Constructor
-   *
-   * @param tableResource
-   * @param versions
-   * @throws java.io.IOException
-   */
-  public MultiRowResource(TableResource tableResource, String versions) throws 
IOException {
-    super();
-    this.tableResource = tableResource;
-
-    if (versions != null) {
-      this.versions = Integer.valueOf(versions);
-
-    }
-  }
-
-  @GET
-  @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, 
MIMETYPE_PROTOBUF_IETF })
-  public Response get(final @Context UriInfo uriInfo) {
-    MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
-
-    servlet.getMetrics().incrementRequests(1);
-    try {
-      CellSetModel model = new CellSetModel();
-      for (String rk : params.get(ROW_KEYS_PARAM_NAME)) {
-        RowSpec rowSpec = new RowSpec(rk);
-
-        if (this.versions != null) {
-          rowSpec.setMaxVersions(this.versions);
-        }
-        ResultGenerator generator =
-          ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, 
null,
-            !params.containsKey(NOCACHE_PARAM_NAME));
-        Cell value = null;
-        RowModel rowModel = new RowModel(rk);
-        if (generator.hasNext()) {
-          while ((value = generator.next()) != null) {
-            rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), 
CellUtil
-                .cloneQualifier(value), value.getTimestamp(), 
CellUtil.cloneValue(value)));
-          }
-          model.addRow(rowModel);
-        } else {
-          LOG.trace("The row : " + rk + " not found in the table.");
-        }
-      }
-
-      if (model.getRows().size() == 0) {
-      //If no rows found.
-        servlet.getMetrics().incrementFailedGetRequests(1);
-        return Response.status(Response.Status.NOT_FOUND)
-            .type(MIMETYPE_TEXT).entity("No rows found." + CRLF)
-            .build();
-      } else {
-        servlet.getMetrics().incrementSucessfulGetRequests(1);
-        return Response.ok(model).build();
-      }
-    } catch (Exception e) {
-      servlet.getMetrics().incrementFailedGetRequests(1);
-      return processException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
deleted file mode 100644
index bbaf1f7..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Common interface for models capable of supporting protobuf marshalling
- * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and
- * ProtobufMessageBodyProducer adapters. 
- */
[email protected]
-public interface ProtobufMessageHandler {
-  /**
-   * @return the protobuf represention of the model
-   */
-  byte[] createProtobufOutput();
-
-  /**
-   * Initialize the model from a protobuf representation.
-   * @param message the raw bytes of the protobuf message
-   * @return reference to self for convenience
-   * @throws IOException
-   */
-  ProtobufMessageHandler getObjectFromMessage(byte[] message)
-    throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
deleted file mode 100644
index 93bb940..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.rest;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.List;
-
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.StreamingOutput;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
-import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-public class ProtobufStreamingUtil implements StreamingOutput {
-
-  private static final Log LOG = 
LogFactory.getLog(ProtobufStreamingUtil.class);
-  private String contentType;
-  private ResultScanner resultScanner;
-  private int limit;
-  private int fetchSize;
-
-  protected ProtobufStreamingUtil(ResultScanner scanner, String type, int 
limit, int fetchSize) {
-    this.resultScanner = scanner;
-    this.contentType = type;
-    this.limit = limit;
-    this.fetchSize = fetchSize;
-    LOG.debug("Created ScanStreamingUtil with content type = " + 
this.contentType + " user limit : "
-        + this.limit + " scan fetch size : " + this.fetchSize);
-  }
-
-  @Override
-  public void write(OutputStream outStream) throws IOException, 
WebApplicationException {
-    Result[] rowsToSend;
-    if(limit < fetchSize){
-      rowsToSend = this.resultScanner.next(limit);
-      writeToStream(createModelFromResults(rowsToSend), this.contentType, 
outStream);
-    } else {
-      int count = limit;
-      while (count > 0) {
-        if (count < fetchSize) {
-          rowsToSend = this.resultScanner.next(count);
-        } else {
-          rowsToSend = this.resultScanner.next(this.fetchSize);
-        }
-        if(rowsToSend.length == 0){
-          break;
-        }
-        count = count - rowsToSend.length;
-        writeToStream(createModelFromResults(rowsToSend), this.contentType, 
outStream);
-      }
-    }
-  }
-
-  private void writeToStream(CellSetModel model, String contentType, 
OutputStream outStream)
-      throws IOException {
-    byte[] objectBytes = model.createProtobufOutput();
-    outStream.write(Bytes.toBytes((short)objectBytes.length));
-    outStream.write(objectBytes);
-    outStream.flush();
-    LOG.trace("Wrote " + model.getRows().size() + " rows to stream 
successfully.");
-  }
-
-  private CellSetModel createModelFromResults(Result[] results) {
-    CellSetModel cellSetModel = new CellSetModel();
-    for (Result rs : results) {
-      byte[] rowKey = rs.getRow();
-      RowModel rModel = new RowModel(rowKey);
-      List<Cell> kvs = rs.listCells();
-      for (Cell kv : kvs) {
-        rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv), kv
-            .getTimestamp(), CellUtil.cloneValue(kv)));
-      }
-      cellSetModel.addRow(rModel);
-    }
-    return cellSetModel;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/052a6f07/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
deleted file mode 100644
index 9f59b06..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.rest;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.http.InfoServer;
-import org.apache.hadoop.hbase.rest.filter.AuthFilter;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.HttpServerUtil;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.hbase.util.VersionInfo;
-import org.apache.hadoop.net.DNS;
-import org.mortbay.jetty.Connector;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSelectChannelConnector;
-import org.mortbay.jetty.servlet.Context;
-import org.mortbay.jetty.servlet.FilterHolder;
-import org.mortbay.jetty.servlet.ServletHolder;
-import org.mortbay.thread.QueuedThreadPool;
-
-import com.google.common.base.Preconditions;
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-/**
- * Main class for launching REST gateway as a servlet hosted by Jetty.
- * <p>
- * The following options are supported:
- * <ul>
- * <li>-p --port : service port</li>
- * <li>-ro --readonly : server mode</li>
- * </ul>
- */
[email protected]
-public class RESTServer implements Constants {
-
-  private static void printUsageAndExit(Options options, int exitCode) {
-    HelpFormatter formatter = new HelpFormatter();
-    formatter.printHelp("bin/hbase rest start", "", options,
-      "\nTo run the REST server as a daemon, execute " +
-      "bin/hbase-daemon.sh start|stop rest [--infoport <port>] [-p <port>] 
[-ro]\n", true);
-    System.exit(exitCode);
-  }
-
-  /**
-   * The main method for the HBase rest server.
-   * @param args command-line arguments
-   * @throws Exception exception
-   */
-  public static void main(String[] args) throws Exception {
-    Log LOG = LogFactory.getLog("RESTServer");
-
-    VersionInfo.logVersion();
-    FilterHolder authFilter = null;
-    Configuration conf = HBaseConfiguration.create();
-    Class<? extends ServletContainer> containerClass = ServletContainer.class;
-    UserProvider userProvider = UserProvider.instantiate(conf);
-    // login the server principal (if using secure Hadoop)
-    if (userProvider.isHadoopSecurityEnabled() && 
userProvider.isHBaseSecurityEnabled()) {
-      String machineName = Strings.domainNamePointerToHostName(
-        DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
-          conf.get(REST_DNS_NAMESERVER, "default")));
-      String keytabFilename = conf.get(REST_KEYTAB_FILE);
-      Preconditions.checkArgument(keytabFilename != null && 
!keytabFilename.isEmpty(),
-        REST_KEYTAB_FILE + " should be set if security is enabled");
-      String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL);
-      Preconditions.checkArgument(principalConfig != null && 
!principalConfig.isEmpty(),
-        REST_KERBEROS_PRINCIPAL + " should be set if security is enabled");
-      userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, 
machineName);
-      if (conf.get(REST_AUTHENTICATION_TYPE) != null) {
-        containerClass = RESTServletContainer.class;
-        authFilter = new FilterHolder();
-        authFilter.setClassName(AuthFilter.class.getName());
-        authFilter.setName("AuthenticationFilter");
-      }
-    }
-
-    RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
-
-    Options options = new Options();
-    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
-    options.addOption("ro", "readonly", false, "Respond only to GET HTTP " +
-      "method requests [default: false]");
-    options.addOption(null, "infoport", true, "Port for web UI");
-
-    CommandLine commandLine = null;
-    try {
-      commandLine = new PosixParser().parse(options, args);
-    } catch (ParseException e) {
-      LOG.error("Could not parse: ", e);
-      printUsageAndExit(options, -1);
-    }
-
-    // check for user-defined port setting, if so override the conf
-    if (commandLine != null && commandLine.hasOption("port")) {
-      String val = commandLine.getOptionValue("port");
-      servlet.getConfiguration()
-          .setInt("hbase.rest.port", Integer.valueOf(val));
-      LOG.debug("port set to " + val);
-    }
-
-    // check if server should only process GET requests, if so override the 
conf
-    if (commandLine != null && commandLine.hasOption("readonly")) {
-      servlet.getConfiguration().setBoolean("hbase.rest.readonly", true);
-      LOG.debug("readonly set to true");
-    }
-
-    // check for user-defined info server port setting, if so override the conf
-    if (commandLine != null && commandLine.hasOption("infoport")) {
-      String val = commandLine.getOptionValue("infoport");
-      servlet.getConfiguration()
-          .setInt("hbase.rest.info.port", Integer.valueOf(val));
-      LOG.debug("Web UI port set to " + val);
-    }
-
-    @SuppressWarnings("unchecked")
-    List<String> remainingArgs = commandLine != null ?
-        commandLine.getArgList() : new ArrayList<String>();
-    if (remainingArgs.size() != 1) {
-      printUsageAndExit(options, 1);
-    }
-
-    String command = remainingArgs.get(0);
-    if ("start".equals(command)) {
-      // continue and start container
-    } else if ("stop".equals(command)) {
-      System.exit(1);
-    } else {
-      printUsageAndExit(options, 1);
-    }
-
-    // set up the Jersey servlet container for Jetty
-    ServletHolder sh = new ServletHolder(containerClass);
-    sh.setInitParameter(
-      "com.sun.jersey.config.property.resourceConfigClass",
-      ResourceConfig.class.getCanonicalName());
-    sh.setInitParameter("com.sun.jersey.config.property.packages",
-      "jetty");
-    // The servlet holder below is instantiated to only handle the case
-    // of the /status/cluster returning arrays of nodes (live/dead). Without
-    // this servlet holder, the problem is that the node arrays in the response
-    // are collapsed to single nodes. We want to be able to treat the
-    // node lists as POJO in the response to /status/cluster servlet call,
-    // but not change the behavior for any of the other servlets
-    // Hence we don't use the servlet holder for all servlets / paths
-    ServletHolder shPojoMap = new ServletHolder(containerClass);
-    @SuppressWarnings("unchecked")
-    Map<String, String> shInitMap = sh.getInitParameters();
-    for (Entry<String, String> e : shInitMap.entrySet()) {
-      shPojoMap.setInitParameter(e.getKey(), e.getValue());
-    }
-    shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true");
-
-    // set up Jetty and run the embedded server
-
-    Server server = new Server();
-
-    Connector connector = new SelectChannelConnector();
-    if(conf.getBoolean(REST_SSL_ENABLED, false)) {
-      SslSelectChannelConnector sslConnector = new SslSelectChannelConnector();
-      String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
-      String password = HBaseConfiguration.getPassword(conf,
-        REST_SSL_KEYSTORE_PASSWORD, null);
-      String keyPassword = HBaseConfiguration.getPassword(conf,
-        REST_SSL_KEYSTORE_KEYPASSWORD, password);
-      sslConnector.setKeystore(keystore);
-      sslConnector.setPassword(password);
-      sslConnector.setKeyPassword(keyPassword);
-      connector = sslConnector;
-    }
-    connector.setPort(servlet.getConfiguration().getInt("hbase.rest.port", 
8080));
-    connector.setHost(servlet.getConfiguration().get("hbase.rest.host", 
"0.0.0.0"));
-
-    server.addConnector(connector);
-
-    // Set the default max thread number to 100 to limit
-    // the number of concurrent requests so that REST server doesn't OOM 
easily.
-    // Jetty set the default max thread number to 250, if we don't set it.
-    //
-    // Our default min thread number 2 is the same as that used by Jetty.
-    int maxThreads = 
servlet.getConfiguration().getInt("hbase.rest.threads.max", 100);
-    int minThreads = 
servlet.getConfiguration().getInt("hbase.rest.threads.min", 2);
-    QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads);
-    threadPool.setMinThreads(minThreads);
-    server.setThreadPool(threadPool);
-
-    server.setSendServerVersion(false);
-    server.setSendDateHeader(false);
-    server.setStopAtShutdown(true);
-      // set up context
-    Context context = new Context(server, "/", Context.SESSIONS);
-    context.addServlet(shPojoMap, "/status/cluster");
-    context.addServlet(sh, "/*");
-    if (authFilter != null) {
-      context.addFilter(authFilter, "/*", 1);
-    }
-
-    // Load filters from configuration.
-    String[] filterClasses = 
servlet.getConfiguration().getStrings(FILTER_CLASSES,
-      ArrayUtils.EMPTY_STRING_ARRAY);
-    for (String filter : filterClasses) {
-      filter = filter.trim();
-      context.addFilter(Class.forName(filter), "/*", 0);
-    }
-    HttpServerUtil.constrainHttpMethods(context);
-
-    // Put up info server.
-    int port = conf.getInt("hbase.rest.info.port", 8085);
-    if (port >= 0) {
-      conf.setLong("startcode", System.currentTimeMillis());
-      String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
-      InfoServer infoServer = new InfoServer("rest", a, port, false, conf);
-      infoServer.setAttribute("hbase.conf", conf);
-      infoServer.start();
-    }
-
-    // start server
-    server.start();
-    server.join();
-  }
-}

Reply via email to