Repository: hbase
Updated Branches:
  refs/heads/master f8c3a5b15 -> 3cc5d1903


http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index d50005b..25aeeed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -153,9 +154,9 @@ public class Merge extends Configured implements Tool {
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta);
     }
-    HTableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
+    TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
-    HRegion merged = merge(htd, meta, info1, info2);
+    HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         meta.getRegionInfo());

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
index f773b06..f4c0c77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
@@ -18,8 +18,11 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
+import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -27,8 +30,12 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationStateZKBase;
@@ -151,8 +158,9 @@ public class ZKDataMigrator extends Configured implements 
Tool {
       }
       byte[] data = ZKUtil.getData(zkw, znode);
       if (ProtobufUtil.isPBMagicPrefix(data)) continue;
-      ZooKeeperProtos.Table.Builder builder = 
ZooKeeperProtos.Table.newBuilder();
-      
builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data)));
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      
builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data)));
       data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
       ZKUtil.setData(zkw, znode, data);
     }
@@ -247,6 +255,77 @@ public class ZKDataMigrator extends Configured implements 
Tool {
     }
   }
 
+  /**
+   * Method for table states migration.
+   * Reading state from zk, applying them to internal state
+   * and delete.
+   * Used by master to clean migration from zk based states to
+   * table descriptor based states.
+   */
+  @Deprecated
+  public static Map<TableName, TableState.State> 
queryForTableStates(ZooKeeperWatcher zkw)
+      throws KeeperException, InterruptedException {
+    Map<TableName, TableState.State> rv = new HashMap<>();
+    List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
+    if (children == null)
+      return rv;
+    for (String child: children) {
+      TableName tableName = TableName.valueOf(child);
+      ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, 
tableName);
+      TableState.State newState = TableState.State.ENABLED;
+      if (state != null) {
+        switch (state) {
+        case ENABLED:
+          newState = TableState.State.ENABLED;
+          break;
+        case DISABLED:
+          newState = TableState.State.DISABLED;
+          break;
+        case DISABLING:
+          newState = TableState.State.DISABLING;
+          break;
+        case ENABLING:
+          newState = TableState.State.ENABLING;
+          break;
+        default:
+        }
+      }
+      rv.put(tableName, newState);
+    }
+    return rv;
+  }
+
+  /**
+   * Gets table state from ZK.
+   * @param zkw ZooKeeperWatcher instance to use
+   * @param tableName table we're checking
+   * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found 
in znode.
+   * @throws KeeperException
+   */
+  @Deprecated
+  private static  ZooKeeperProtos.DeprecatedTableState.State getTableState(
+      final ZooKeeperWatcher zkw, final TableName tableName)
+      throws KeeperException, InterruptedException {
+    String znode = ZKUtil.joinZNode(zkw.tableZNode, 
tableName.getNameAsString());
+    byte [] data = ZKUtil.getData(zkw, znode);
+    if (data == null || data.length <= 0) return null;
+    try {
+      ProtobufUtil.expectPBMagicPrefix(data);
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      int magicLen = ProtobufUtil.lengthOfPBMagic();
+      ZooKeeperProtos.DeprecatedTableState t = builder.mergeFrom(data,
+          magicLen, data.length - magicLen).build();
+      return t.getState();
+    } catch (InvalidProtocolBufferException e) {
+      KeeperException ke = new KeeperException.DataInconsistencyException();
+      ke.initCause(e);
+      throw ke;
+    } catch (DeserializationException e) {
+      throw ZKUtil.convert(e);
+    }
+  }
+
   public static void main(String args[]) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new 
ZKDataMigrator(), args));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
deleted file mode 100644
index 1aff12f..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.InterruptedIOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Implementation of TableStateManager which reads, caches and sets state
- * up in ZooKeeper.  If multiple read/write clients, will make for confusion.
- * Code running on client side without consensus context should use
- * {@link ZKTableStateClientSideReader} instead.
- *
- * <p>To save on trips to the zookeeper ensemble, internally we cache table
- * state.
- */
-@InterfaceAudience.Private
-public class ZKTableStateManager implements TableStateManager {
-  // A znode will exist under the table directory if it is in any of the
-  // following states: {@link TableState#ENABLING} , {@link 
TableState#DISABLING},
-  // or {@link TableState#DISABLED}.  If {@link TableState#ENABLED}, there will
-  // be no entry for a table in zk.  Thats how it currently works.
-
-  private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class);
-  private final ZooKeeperWatcher watcher;
-
-  /**
-   * Cache of what we found in zookeeper so we don't have to go to zk ensemble
-   * for every query.  Synchronize access rather than use concurrent Map 
because
-   * synchronization needs to span query of zk.
-   */
-  private final Map<TableName, ZooKeeperProtos.Table.State> cache =
-    new HashMap<TableName, ZooKeeperProtos.Table.State>();
-
-  public ZKTableStateManager(final ZooKeeperWatcher zkw) throws 
KeeperException,
-      InterruptedException {
-    super();
-    this.watcher = zkw;
-    populateTableStates();
-  }
-
-  /**
-   * Gets a list of all the tables set as disabled in zookeeper.
-   * @throws KeeperException, InterruptedException
-   */
-  private void populateTableStates() throws KeeperException, 
InterruptedException {
-    synchronized (this.cache) {
-      List<String> children = ZKUtil.listChildrenNoWatch(this.watcher, 
this.watcher.tableZNode);
-      if (children == null) return;
-      for (String child: children) {
-        TableName tableName = TableName.valueOf(child);
-        ZooKeeperProtos.Table.State state = getTableState(this.watcher, 
tableName);
-        if (state != null) this.cache.put(tableName, state);
-      }
-    }
-  }
-
-  /**
-   * Sets table state in ZK. Sets no watches.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public void setTableState(TableName tableName, ZooKeeperProtos.Table.State 
state)
-  throws CoordinatedStateException {
-    synchronized (this.cache) {
-      LOG.warn("Moving table " + tableName + " state from " + 
this.cache.get(tableName)
-        + " to " + state);
-      try {
-        setTableStateInZK(tableName, state);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-    }
-  }
-
-  /**
-   * Checks and sets table state in ZK. Sets no watches.
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean setTableStateIfInStates(TableName tableName,
-                                         ZooKeeperProtos.Table.State newState,
-                                         ZooKeeperProtos.Table.State... states)
-      throws CoordinatedStateException {
-    synchronized (this.cache) {
-      // Transition ENABLED->DISABLING has to be performed with a hack, because
-      // we treat empty state as enabled in this case because 0.92- clusters.
-      if (
-          (newState == ZooKeeperProtos.Table.State.DISABLING) &&
-               this.cache.get(tableName) != null && !isTableState(tableName, 
states) ||
-          (newState != ZooKeeperProtos.Table.State.DISABLING &&
-               !isTableState(tableName, states) )) {
-        return false;
-      }
-      try {
-        setTableStateInZK(tableName, newState);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Checks and sets table state in ZK. Sets no watches.
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean setTableStateIfNotInStates(TableName tableName,
-                                            ZooKeeperProtos.Table.State 
newState,
-                                            ZooKeeperProtos.Table.State... 
states)
-    throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (isTableState(tableName, states)) {
-        return false;
-      }
-      try {
-        setTableStateInZK(tableName, newState);
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-      return true;
-    }
-  }
-
-  private void setTableStateInZK(final TableName tableName,
-                                 final ZooKeeperProtos.Table.State state)
-      throws KeeperException {
-    String znode = ZKUtil.joinZNode(this.watcher.tableZNode, 
tableName.getNameAsString());
-    if (ZKUtil.checkExists(this.watcher, znode) == -1) {
-      ZKUtil.createAndFailSilent(this.watcher, znode);
-    }
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.Builder builder = 
ZooKeeperProtos.Table.newBuilder();
-      builder.setState(state);
-      byte [] data = 
ProtobufUtil.prependPBMagic(builder.build().toByteArray());
-      ZKUtil.setData(this.watcher, znode, data);
-      this.cache.put(tableName, state);
-    }
-  }
-
-  /**
-   * Checks if table is marked in specified state in ZK.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean isTableState(final TableName tableName,
-      final ZooKeeperProtos.Table.State... states) {
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.State currentState = this.cache.get(tableName);
-      return isTableInState(Arrays.asList(states), currentState);
-    }
-  }
-
-  /**
-   * Deletes the table in zookeeper.  Fails silently if the
-   * table is not currently disabled in zookeeper.  Sets no watches.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public void setDeletedTable(final TableName tableName)
-  throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (this.cache.remove(tableName) == null) {
-        LOG.warn("Moving table " + tableName + " state to deleted but was " +
-          "already deleted");
-      }
-      try {
-        ZKUtil.deleteNodeFailSilent(this.watcher,
-          ZKUtil.joinZNode(this.watcher.tableZNode, 
tableName.getNameAsString()));
-      } catch (KeeperException e) {
-        throw new CoordinatedStateException(e);
-      }
-    }
-  }
-
-  /**
-   * check if table is present.
-   *
-   * @param tableName table we're working on
-   * @return true if the table is present
-   */
-  @Override
-  public boolean isTablePresent(final TableName tableName) {
-    synchronized (this.cache) {
-      ZooKeeperProtos.Table.State state = this.cache.get(tableName);
-      return !(state == null);
-    }
-  }
-
-  /**
-   * Gets a list of all the tables set as disabling in zookeeper.
-   * @return Set of disabling tables, empty Set if none
-   * @throws CoordinatedStateException if error happened in underlying 
coordination engine
-   */
-  @Override
-  public Set<TableName> getTablesInStates(ZooKeeperProtos.Table.State... 
states)
-    throws InterruptedIOException, CoordinatedStateException {
-    try {
-      return getAllTables(states);
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void checkAndRemoveTableState(TableName tableName, 
ZooKeeperProtos.Table.State states,
-                                       boolean deletePermanentState)
-      throws CoordinatedStateException {
-    synchronized (this.cache) {
-      if (isTableState(tableName, states)) {
-        this.cache.remove(tableName);
-        if (deletePermanentState) {
-          try {
-            ZKUtil.deleteNodeFailSilent(this.watcher,
-                ZKUtil.joinZNode(this.watcher.tableZNode, 
tableName.getNameAsString()));
-          } catch (KeeperException e) {
-            throw new CoordinatedStateException(e);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Gets a list of all the tables of specified states in zookeeper.
-   * @return Set of tables of specified states, empty Set if none
-   * @throws KeeperException
-   */
-  Set<TableName> getAllTables(final ZooKeeperProtos.Table.State... states)
-      throws KeeperException, InterruptedIOException {
-
-    Set<TableName> allTables = new HashSet<TableName>();
-    List<String> children =
-      ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode);
-    if(children == null) return allTables;
-    for (String child: children) {
-      TableName tableName = TableName.valueOf(child);
-      ZooKeeperProtos.Table.State state;
-      try {
-        state = getTableState(watcher, tableName);
-      } catch (InterruptedException e) {
-        throw new InterruptedIOException();
-      }
-      for (ZooKeeperProtos.Table.State expectedState: states) {
-        if (state == expectedState) {
-          allTables.add(tableName);
-          break;
-        }
-      }
-    }
-    return allTables;
-  }
-
-  /**
-   * Gets table state from ZK.
-   * @param zkw ZooKeeperWatcher instance to use
-   * @param tableName table we're checking
-   * @return Null or {@link ZooKeeperProtos.Table.State} found in znode.
-   * @throws KeeperException
-   */
-  private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
-                                                   final TableName tableName)
-    throws KeeperException, InterruptedException {
-    String znode = ZKUtil.joinZNode(zkw.tableZNode, 
tableName.getNameAsString());
-    byte [] data = ZKUtil.getData(zkw, znode);
-    if (data == null || data.length <= 0) return null;
-    try {
-      ProtobufUtil.expectPBMagicPrefix(data);
-      ZooKeeperProtos.Table.Builder builder = 
ZooKeeperProtos.Table.newBuilder();
-      int magicLen = ProtobufUtil.lengthOfPBMagic();
-      ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length 
- magicLen).build();
-      return t.getState();
-    } catch (InvalidProtocolBufferException e) {
-      KeeperException ke = new KeeperException.DataInconsistencyException();
-      ke.initCause(e);
-      throw ke;
-    } catch (DeserializationException e) {
-      throw ZKUtil.convert(e);
-    }
-  }
-
-  /**
-   * @return true if current state isn't null and is contained
-   * in the list of expected states.
-   */
-  private boolean isTableInState(final List<ZooKeeperProtos.Table.State> 
expectedStates,
-                       final ZooKeeperProtos.Table.State currentState) {
-    return currentState != null && expectedStates.contains(currentState);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 45bc524..dd9384d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2818,6 +2818,48 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
   }
 
   /**
+   * Waits for a table to be 'disabled'.  Disabled means that table is set as 
'disabled'
+   * Will timeout after default period (30 seconds)
+   * @param table Table to wait on.
+   * @throws InterruptedException
+   * @throws IOException
+   */
+  public void waitTableDisabled(byte[] table)
+          throws InterruptedException, IOException {
+    waitTableDisabled(getHBaseAdmin(), table, 30000);
+  }
+
+  public void waitTableDisabled(Admin admin, byte[] table)
+          throws InterruptedException, IOException {
+    waitTableDisabled(admin, table, 30000);
+  }
+
+  /**
+   * Waits for a table to be 'disabled'.  Disabled means that table is set as 
'disabled'
+   * @see #waitTableAvailable(byte[])
+   * @param table Table to wait on.
+   * @param timeoutMillis Time to wait on it being marked disabled.
+   * @throws InterruptedException
+   * @throws IOException
+   */
+  public void waitTableDisabled(byte[] table, long timeoutMillis)
+          throws InterruptedException, IOException {
+    waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
+  }
+
+  public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis)
+          throws InterruptedException, IOException {
+    TableName tableName = TableName.valueOf(table);
+    long startWait = System.currentTimeMillis();
+    while (!admin.isTableDisabled(tableName)) {
+      assertTrue("Timed out waiting for table to become disabled " +
+                      Bytes.toStringBinary(table),
+              System.currentTimeMillis() - startWait < timeoutMillis);
+      Thread.sleep(200);
+    }
+  }
+
+  /**
    * Make sure that at least the specified number of region servers
    * are running
    * @param num minimum number of region servers that should be running

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
index 26a8d2c..4fa945a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
@@ -148,8 +148,8 @@ public class TestHColumnDescriptorDefaultVersions {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = 
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), 
tableDir);
-    hcds = htd.getColumnFamilies();
+    TableDescriptor td = 
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+    hcds = td.getHTableDescriptor().getColumnFamilies();
     verifyHColumnDescriptor(expected, hcds, tableName, families);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
new file mode 100644
index 0000000..19c1136
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java
@@ -0,0 +1,57 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test setting values in the descriptor
+ */
+@Category(SmallTests.class)
+public class TestTableDescriptor {
+  final static Log LOG = LogFactory.getLog(TestTableDescriptor.class);
+
+  @Test
+  public void testPb() throws DeserializationException, IOException {
+    HTableDescriptor htd = new 
HTableDescriptor(HTableDescriptor.META_TABLEDESC);
+    final int v = 123;
+    htd.setMaxFileSize(v);
+    htd.setDurability(Durability.ASYNC_WAL);
+    htd.setReadOnly(true);
+    htd.setRegionReplication(2);
+    TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
+    byte[] bytes = td.toByteArray();
+    TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes);
+    assertEquals(td, deserializedTd);
+    assertEquals(td.getHTableDescriptor(), 
deserializedTd.getHTableDescriptor());
+    assertEquals(td.getTableState(), deserializedTd.getTableState());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
index b6b446d..6fe6ede 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.wal.HLogUtilsForTests;
@@ -66,7 +65,6 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -258,7 +256,7 @@ public class TestAdmin {
     this.admin.disableTable(ht.getName());
     assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster()
         
.getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        ht.getName(), ZooKeeperProtos.Table.State.DISABLED));
+        ht.getName(), TableState.State.DISABLED));
 
     // Test that table is disabled
     get = new Get(row);
@@ -273,7 +271,7 @@ public class TestAdmin {
     this.admin.enableTable(table);
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster()
         
.getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        ht.getName(), ZooKeeperProtos.Table.State.ENABLED));
+        ht.getName(), TableState.State.ENABLED));
 
     // Test that table is enabled
     try {
@@ -346,7 +344,7 @@ public class TestAdmin {
     assertEquals(numTables + 1, tables.length);
     assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster()
         
.getMaster().getAssignmentManager().getTableStateManager().isTableState(
-        TableName.valueOf("testCreateTable"), 
ZooKeeperProtos.Table.State.ENABLED));
+        TableName.valueOf("testCreateTable"), TableState.State.ENABLED));
   }
 
   @Test (timeout=300000)
@@ -1128,8 +1126,7 @@ public class TestAdmin {
     ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
     TableName tableName = TableName.valueOf("testMasterAdmin");
     TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
-    while (!ZKTableStateClientSideReader.isEnabledTable(zkw,
-      TableName.valueOf("testMasterAdmin"))) {
+    while (!this.admin.isTableEnabled(TableName.valueOf("testMasterAdmin"))) {
       Thread.sleep(10);
     }
     this.admin.disableTable(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 4acece3..93f868c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -53,15 +53,14 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -127,7 +126,8 @@ public class TestAssignmentManagerOnCluster {
       }
       RegionState metaState =
         MetaTableLocator.getMetaRegionState(master.getZooKeeper());
-      assertEquals("Meta should be not in transition", metaState.getState(), 
State.OPEN);
+      assertEquals("Meta should be not in transition",
+          metaState.getState(), RegionState.State.OPEN);
       assertNotEquals("Meta should be moved off master",
         metaState.getServerName(), master.getServerName());
       assertEquals("Meta should be on the meta server",
@@ -153,7 +153,8 @@ public class TestAssignmentManagerOnCluster {
         regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO));
       // Now, make sure meta is registered in zk
       metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper());
-      assertEquals("Meta should be not in transition", metaState.getState(), 
State.OPEN);
+      assertEquals("Meta should be not in transition",
+          metaState.getState(), RegionState.State.OPEN);
       assertEquals("Meta should be assigned", metaState.getServerName(),
         
regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO));
       assertNotEquals("Meta should be assigned on a different server",
@@ -209,7 +210,8 @@ public class TestAssignmentManagerOnCluster {
     String table = "testAssignRegionOnRestartedServer";
     
TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts",
 20);
     TEST_UTIL.getMiniHBaseCluster().stopMaster(0);
-    TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so 
that conf take into affect
+    //restart the master so that conf take into affect
+    TEST_UTIL.getMiniHBaseCluster().startMaster();
 
     ServerName deadServer = null;
     HMaster master = null;
@@ -619,9 +621,9 @@ public class TestAssignmentManagerOnCluster {
         }
       }
       am.regionOffline(hri);
-      am.getRegionStates().updateRegionState(hri, State.PENDING_OPEN, 
destServerName);
+      am.getRegionStates().updateRegionState(hri, 
RegionState.State.PENDING_OPEN, destServerName);
 
-      am.getTableStateManager().setTableState(table, 
ZooKeeperProtos.Table.State.DISABLING);
+      am.getTableStateManager().setTableState(table, 
TableState.State.DISABLING);
       List<HRegionInfo> toAssignRegions = 
am.processServerShutdown(destServerName);
       assertTrue("Regions to be assigned should be empty.", 
toAssignRegions.isEmpty());
       assertTrue("Regions to be assigned should be empty.", 
am.getRegionStates()
@@ -630,7 +632,7 @@ public class TestAssignmentManagerOnCluster {
       if (hri != null && serverName != null) {
         am.regionOnline(hri, serverName);
       }
-      am.getTableStateManager().setTableState(table, 
ZooKeeperProtos.Table.State.DISABLED);
+      am.getTableStateManager().setTableState(table, 
TableState.State.DISABLED);
       TEST_UTIL.deleteTable(table);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 4d590b2..0103639 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.Reference;
 import 
org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
@@ -299,13 +301,18 @@ public class TestCatalogJanitor {
       return new TableDescriptors() {
         @Override
         public HTableDescriptor remove(TableName tablename) throws IOException 
{
-          // TODO Auto-generated method stub
+          // noop
           return null;
         }
 
         @Override
         public Map<String, HTableDescriptor> getAll() throws IOException {
-          // TODO Auto-generated method stub
+          // noop
+          return null;
+        }
+
+        @Override public Map<String, TableDescriptor> getAllDescriptors() 
throws IOException {
+          // noop
           return null;
         }
 
@@ -316,14 +323,24 @@ public class TestCatalogJanitor {
         }
 
         @Override
+        public TableDescriptor getDescriptor(TableName tablename)
+            throws IOException {
+          return createTableDescriptor();
+        }
+
+        @Override
         public Map<String, HTableDescriptor> getByNamespace(String name) 
throws IOException {
           return null;
         }
 
         @Override
         public void add(HTableDescriptor htd) throws IOException {
-          // TODO Auto-generated method stub
+          // noop
+        }
 
+        @Override
+        public void add(TableDescriptor htd) throws IOException {
+          // noop
         }
       };
     }
@@ -408,6 +425,11 @@ public class TestCatalogJanitor {
     }
 
     @Override
+    public TableStateManager getTableStateManager() {
+      return null;
+    }
+
+    @Override
     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo 
region_b,
         boolean forcible) throws IOException {
     }
@@ -978,6 +1000,11 @@ public class TestCatalogJanitor {
     return htd;
   }
 
+  private TableDescriptor createTableDescriptor() {
+    TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), 
TableState.State.ENABLED);
+    return htd;
+  }
+
   private MultiResponse buildMultiResponse(MultiRequest req) {
     MultiResponse.Builder builder = MultiResponse.newBuilder();
     RegionActionResult.Builder regionActionResultBuilder =

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 9feb893..6129dd7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -82,7 +82,7 @@ public class TestMaster {
 
     HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME);
     
assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME,
-      ZooKeeperProtos.Table.State.ENABLED));
+      TableState.State.ENABLED));
     TEST_UTIL.loadTable(ht, FAMILYNAME, false);
     ht.close();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index d04afdf..56961d5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -95,8 +95,8 @@ public class TestMasterRestartAfterDisablingTable {
 
     assertTrue("The table should not be in enabled state", cluster.getMaster()
         .getAssignmentManager().getTableStateManager().isTableState(
-        TableName.valueOf("tableRestart"), 
ZooKeeperProtos.Table.State.DISABLED,
-        ZooKeeperProtos.Table.State.DISABLING));
+        TableName.valueOf("tableRestart"), TableState.State.DISABLED,
+        TableState.State.DISABLING));
     log("Enabling table\n");
     // Need a new Admin, the previous one is on the old master
     Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
@@ -111,7 +111,7 @@ public class TestMasterRestartAfterDisablingTable {
           6, regions.size());
     assertTrue("The table should be in enabled state", cluster.getMaster()
         .getAssignmentManager().getTableStateManager()
-        .isTableState(TableName.valueOf("tableRestart"), 
ZooKeeperProtos.Table.State.ENABLED));
+        .isTableState(TableName.valueOf("tableRestart"), 
TableState.State.ENABLED));
     ht.close();
     TEST_UTIL.shutdownMiniCluster();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
index 376729b..5d24368 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -393,12 +394,14 @@ public class TestTableLockManager {
 
     alterThread.start();
     splitThread.start();
+    TEST_UTIL.waitTableEnabled(tableName.toBytes());
     while (true) {
       List<HRegionInfo> regions = admin.getTableRegions(tableName);
       LOG.info(String.format("Table #regions: %d regions: %s:", 
regions.size(), regions));
       assertEquals(admin.getTableDescriptor(tableName), desc);
       for (HRegion region : 
TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
-        assertEquals(desc, region.getTableDesc());
+        HTableDescriptor regionTableDesc = region.getTableDesc();
+        assertEquals(desc, regionTableDesc);
       }
       if (regions.size() >= 5) {
         break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
index 3fe3977..0d51875 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -154,8 +155,9 @@ public class TestTableDescriptorModification {
     // Verify descriptor from HDFS
     MasterFileSystem mfs = 
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
     Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), 
tableDir);
-    verifyTableDescriptor(htd, tableName, families);
+    TableDescriptor td =
+        FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), 
tableDir);
+    verifyTableDescriptor(td.getHTableDescriptor(), tableName, families);
   }
 
   private void verifyTableDescriptor(final HTableDescriptor htd,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index c147fd0..6a9c0dc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.client.Admin;
@@ -479,7 +480,8 @@ public class SnapshotTestingUtils {
         this.tableRegions = tableRegions;
         this.snapshotDir = 
SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
         new FSTableDescriptors(conf)
-          .createTableDescriptorForTableDirectory(snapshotDir, htd, false);
+          .createTableDescriptorForTableDirectory(snapshotDir,
+              new TableDescriptor(htd), false);
       }
 
       public HTableDescriptor getTableDescriptor() {
@@ -574,7 +576,8 @@ public class SnapshotTestingUtils {
     private RegionData[] createTable(final HTableDescriptor htd, final int 
nregions)
         throws IOException {
       Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
-      new 
FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, 
false);
+      new 
FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir,
+          new TableDescriptor(htd), false);
 
       assertTrue(nregions % 2 == 0);
       RegionData[] regions = new RegionData[nregions];

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index daf5593..839091c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -28,12 +28,13 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Comparator;
-
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -69,14 +70,15 @@ public class TestFSTableDescriptors {
   public void testCreateAndUpdate() throws IOException {
     Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
     HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf("testCreate"));
+    TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
-    assertTrue(fstd.createTableDescriptor(htd));
-    assertFalse(fstd.createTableDescriptor(htd));
+    assertTrue(fstd.createTableDescriptor(td));
+    assertFalse(fstd.createTableDescriptor(td));
     FileStatus [] statuses = fs.listStatus(testdir);
     assertTrue("statuses.length="+statuses.length, statuses.length == 1);
     for (int i = 0; i < 10; i++) {
-      fstd.updateTableDescriptor(htd);
+      fstd.updateTableDescriptor(td);
     }
     statuses = fs.listStatus(testdir);
     assertTrue(statuses.length == 1);
@@ -90,20 +92,29 @@ public class TestFSTableDescriptors {
     Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
     HTableDescriptor htd = new HTableDescriptor(
         TableName.valueOf("testSequenceidAdvancesOnTableInfo"));
+    TableDescriptor td = new TableDescriptor(htd);
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
-    Path p0 = fstd.updateTableDescriptor(htd);
+    Path p0 = fstd.updateTableDescriptor(td);
     int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
-    Path p1 = fstd.updateTableDescriptor(htd);
+    Path p1 = fstd.updateTableDescriptor(td);
     // Assert we cleaned up the old file.
     assertTrue(!fs.exists(p0));
     int i1 = FSTableDescriptors.getTableInfoSequenceId(p1);
     assertTrue(i1 == i0 + 1);
-    Path p2 = fstd.updateTableDescriptor(htd);
+    Path p2 = fstd.updateTableDescriptor(td);
     // Assert we cleaned up the old file.
     assertTrue(!fs.exists(p1));
     int i2 = FSTableDescriptors.getTableInfoSequenceId(p2);
     assertTrue(i2 == i1 + 1);
+    td = new TableDescriptor(htd, TableState.State.DISABLED);
+    Path p3 = fstd.updateTableDescriptor(td);
+    // Assert we cleaned up the old file.
+    assertTrue(!fs.exists(p2));
+    int i3 = FSTableDescriptors.getTableInfoSequenceId(p3);
+    assertTrue(i3 == i2 + 1);
+    TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName());
+    assertEquals(descriptor, td);
   }
 
   @Test
@@ -155,12 +166,13 @@ public class TestFSTableDescriptors {
     final String name = "testReadingHTDFromFS";
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+    TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
     Path rootdir = UTIL.getDataTestDir(name);
     FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
-    fstd.createTableDescriptor(htd);
-    HTableDescriptor htd2 =
+    fstd.createTableDescriptor(td);
+    TableDescriptor td2 =
       FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, 
htd.getTableName());
-    assertTrue(htd.equals(htd2));
+    assertTrue(td.equals(td2));
   }
 
   @Test public void testHTableDescriptors()
@@ -180,7 +192,8 @@ public class TestFSTableDescriptors {
     final int count = 10;
     // Write out table infos.
     for (int i = 0; i < count; i++) {
-      HTableDescriptor htd = new HTableDescriptor(name + i);
+      TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i),
+          TableState.State.ENABLED);
       htds.createTableDescriptor(htd);
     }
 
@@ -194,7 +207,7 @@ public class TestFSTableDescriptors {
     for (int i = 0; i < count; i++) {
       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
       htd.addFamily(new HColumnDescriptor("" + i));
-      htds.updateTableDescriptor(htd);
+      htds.updateTableDescriptor(new TableDescriptor(htd));
     }
     // Wait a while so mod time we write is for sure different.
     Thread.sleep(100);
@@ -277,18 +290,19 @@ public class TestFSTableDescriptors {
     Path testdir = 
UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready");
     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(
         "testCreateTableDescriptorUpdatesIfThereExistsAlready"));
+    TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
-    assertTrue(fstd.createTableDescriptor(htd));
-    assertFalse(fstd.createTableDescriptor(htd));
+    assertTrue(fstd.createTableDescriptor(td));
+    assertFalse(fstd.createTableDescriptor(td));
     htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
-    assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
+    assertTrue(fstd.createTableDescriptor(td)); //this will re-create
     Path tableDir = fstd.getTableDir(htd.getTableName());
     Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
     FileStatus[] statuses = fs.listStatus(tmpTableDir);
     assertTrue(statuses.length == 0);
 
-    assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, 
tableDir));
+    assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, 
tableDir));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
index 11516de..38924e6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -146,7 +147,8 @@ public class TestMergeTool extends HBaseTestCase {
     try {
       // Create meta region
       createMetaRegion();
-      new FSTableDescriptors(this.fs, 
this.testDir).createTableDescriptor(this.desc);
+      new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(
+          new TableDescriptor(this.desc));
       /*
        * Create the regions we will merge
        */

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc5d190/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java
deleted file mode 100644
index bb3d3d3..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.zookeeper.KeeperException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table;
-
-@Category({MiscTests.class, MediumTests.class})
-public class TestZKTableStateManager {
-  private static final Log LOG = 
LogFactory.getLog(TestZKTableStateManager.class);
-  private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniZKCluster();
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniZKCluster();
-  }
-
-  @Test
-  public void testTableStates()
-      throws CoordinatedStateException, IOException, KeeperException, 
InterruptedException {
-    final TableName name =
-        TableName.valueOf("testDisabled");
-    Abortable abortable = new Abortable() {
-      @Override
-      public void abort(String why, Throwable e) {
-        LOG.info(why, e);
-      }
-
-      @Override
-      public boolean isAborted() {
-        return false;
-      }
-
-    };
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-      name.getNameAsString(), abortable, true);
-    TableStateManager zkt = new ZKTableStateManager(zkw);
-    assertFalse(zkt.isTableState(name, Table.State.ENABLED));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED));
-    assertFalse(zkt.isTableState(name, Table.State.ENABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.DISABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.ENABLING));
-    assertFalse(zkt.isTablePresent(name));
-    zkt.setTableState(name, Table.State.DISABLING);
-    assertTrue(zkt.isTableState(name, Table.State.DISABLING));
-    assertTrue(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.DISABLING));
-    assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name));
-    assertTrue(zkt.isTablePresent(name));
-    zkt.setTableState(name, Table.State.DISABLED);
-    assertTrue(zkt.isTableState(name, Table.State.DISABLED));
-    assertTrue(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.DISABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLING));
-    assertTrue(zkt.getTablesInStates(Table.State.DISABLED).contains(name));
-    assertTrue(zkt.isTablePresent(name));
-    zkt.setTableState(name, Table.State.ENABLING);
-    assertTrue(zkt.isTableState(name, Table.State.ENABLING));
-    assertTrue(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.ENABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED));
-    assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name));
-    assertTrue(zkt.isTablePresent(name));
-    zkt.setTableState(name, Table.State.ENABLED);
-    assertTrue(zkt.isTableState(name, Table.State.ENABLED));
-    assertFalse(zkt.isTableState(name, Table.State.ENABLING));
-    assertTrue(zkt.isTablePresent(name));
-    zkt.setDeletedTable(name);
-    assertFalse(zkt.isTableState(name, Table.State.ENABLED));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED));
-    assertFalse(zkt.isTableState(name, Table.State.ENABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.DISABLING));
-    assertFalse(zkt.isTableState(name, Table.State.DISABLED, 
Table.State.ENABLING));
-    assertFalse(zkt.isTablePresent(name));
-  }
-}

Reply via email to