Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 0f7712123 -> 3eb8021e3


HBASE-13469 Procedure v2 - Make procedure v2 configurable in branch-1.1 
(Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3eb8021e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3eb8021e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3eb8021e

Branch: refs/heads/branch-1.1
Commit: 3eb8021e38b9e92976f97e2d0c9577d3cff380f3
Parents: 0f77121
Author: Matteo Bertozzi <[email protected]>
Authored: Wed Apr 22 18:09:41 2015 +0100
Committer: Matteo Bertozzi <[email protected]>
Committed: Wed Apr 22 18:09:41 2015 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java | 193 ++++++++++++++-----
 .../hadoop/hbase/master/MasterRpcServices.java  |  28 ++-
 .../hadoop/hbase/master/MasterServices.java     |   5 +
 .../hbase/master/TableNamespaceManager.java     |  17 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  12 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |   5 +
 .../hadoop/hbase/master/TestProcedureConf.java  |  93 +++++++++
 7 files changed, 300 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 530dcea..7ecfd8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -90,7 +90,16 @@ import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
+import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
+import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
 import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
+import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
+import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
+import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
+import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
+import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
+import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
 import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
@@ -293,6 +302,14 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   //should we check encryption settings at master side, default true
   private final boolean masterCheckEncryption;
 
+  // This is for fallback to use the code from 1.0 release.
+  private enum ProcedureConf {
+    PROCEDURE_ENABLED, // default
+    HANDLER_USED, // handler code executed in DDL, procedure executor still 
start
+    PROCEDURE_FULLY_DISABLED, // procedure fully disabled
+  }
+  private final ProcedureConf procedureConf;
+
   Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
 
   // monitor for snapshot of hbase tables
@@ -370,6 +387,21 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 
     this.metricsMaster = new MetricsMaster( new 
MetricsMasterWrapperImpl(this));
 
+    // Check configuration to see whether procedure is disabled (not execute 
at all),
+    // unused (not used to execute DDL, but executor starts to complete 
unfinished operations
+    // in procedure store, or enabled (default behavior).
+    String procedureConfString = conf.get("hbase.master.procedure.tableddl", 
"enabled");
+    if (procedureConfString.equalsIgnoreCase("disabled")) {
+      LOG.info("Master will use handler for new table DDL"
+        + " and all unfinished table DDLs in procedure store will be 
disgarded.");
+      this.procedureConf = ProcedureConf.PROCEDURE_FULLY_DISABLED;
+    } else if (procedureConfString.equalsIgnoreCase("unused")) {
+      LOG.info("Master will use handler for new table DDL"
+        + " and all unfinished table DDLs in procedure store will continue to 
execute.");
+      this.procedureConf = ProcedureConf.HANDLER_USED;
+    } else {
+      this.procedureConf = ProcedureConf.PROCEDURE_ENABLED;
+    }
     // preload table descriptor at startup
     this.preLoadTableDescriptors = 
conf.getBoolean("hbase.master.preload.tabledescriptors", true);
 
@@ -1106,13 +1138,39 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
   }
 
+  /**
+   * Check whether the procedure executor is enabled
+   */
+  @Override
+  public boolean isMasterProcedureExecutorEnabled() {
+    return (this.procedureConf == ProcedureConf.PROCEDURE_ENABLED);
+  }
+
   private void startProcedureExecutor() throws IOException {
     final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
     final Path logDir = new Path(fileSystemManager.getRootDir(),
         MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
 
+    if (this.procedureConf == ProcedureConf.PROCEDURE_FULLY_DISABLED) {
+      // Clean up the procedure store so that we will in a clean state when 
procedure
+      // is enabled later.
+      // Note: hbck might needed for uncompleted procedures.
+      try {
+        fs.delete(logDir, true);
+        LOG.warn("Procedure executor is disabled from configuartion. " +
+            "All the state logs from procedure store were removed." +
+            "You should check the cluster state using HBCK.");
+      } catch (Exception e) {
+        // Ignore exception and move on.
+        LOG.error("Removing all the state logs from procedure store failed." +
+            "You should check the cluster state using HBCK.");
+      }
+      return;
+    }
+
     procedureStore = new WALProcedureStore(conf, 
fileSystemManager.getFileSystem(), logDir,
         new MasterProcedureEnv.WALStoreLeaseRecovery(this));
+
     procedureStore.registerListener(new 
MasterProcedureEnv.MasterProcedureStoreListener(this));
     procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
         procEnv.getProcedureQueue());
@@ -1383,13 +1441,19 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
 
-    // TODO: We can handle/merge duplicate requests, and differentiate the 
case of
-    //       TableExistsException by saying if the schema is the same or not.
-    ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
-    long procId = this.procedureExecutor.submitProcedure(
-      new CreateTableProcedure(procedureExecutor.getEnvironment(),
-        hTableDescriptor, newRegions, latch));
-    latch.await();
+    long procId = -1;
+    if (isMasterProcedureExecutorEnabled()) {
+      // TODO: We can handle/merge duplicate requests, and differentiate the 
case of
+      //       TableExistsException by saying if the schema is the same or not.
+      ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
+      procId = this.procedureExecutor.submitProcedure(
+        new CreateTableProcedure(procedureExecutor.getEnvironment(),
+          hTableDescriptor, newRegions, latch));
+      latch.await();
+    } else {
+      this.service.submit(new CreateTableHandler(this, this.fileSystemManager, 
hTableDescriptor,
+        conf, newRegions, this).prepare());
+    }
 
     if (cpHost != null) {
       cpHost.postCreateTable(hTableDescriptor, newRegions);
@@ -1623,11 +1687,16 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
 
-    // TODO: We can handle/merge duplicate request
-    ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
-    long procId = this.procedureExecutor.submitProcedure(
+    long procId = -1;
+    if (isMasterProcedureExecutorEnabled()) {
+      // TODO: We can handle/merge duplicate request
+      ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
+      procId = this.procedureExecutor.submitProcedure(
         new DeleteTableProcedure(procedureExecutor.getEnvironment(), 
tableName, latch));
-    latch.await();
+      latch.await();
+    } else {
+      this.service.submit(new DeleteTableHandler(tableName, this, 
this).prepare());
+    }
 
     if (cpHost != null) {
       cpHost.postDeleteTable(tableName);
@@ -1644,9 +1713,16 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " truncate " + tableName);
 
-    long procId = this.procedureExecutor.submitProcedure(
+    if (isMasterProcedureExecutorEnabled()) {
+      long procId = this.procedureExecutor.submitProcedure(
         new TruncateTableProcedure(procedureExecutor.getEnvironment(), 
tableName, preserveSplits));
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+      ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+    } else {
+      TruncateTableHandler handler =
+          new TruncateTableHandler(tableName, this, this, preserveSplits);
+      handler.prepare();
+      handler.process();
+    }
 
     if (cpHost != null) {
       cpHost.postTruncateTable(tableName);
@@ -1664,11 +1740,17 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
         return;
       }
     }
-    // Execute the operation synchronously - wait for the operation to 
complete before continuing.
-    long procId =
+    LOG.info(getClientIdAuditPrefix() + " add " + columnDescriptor);
+
+    if (isMasterProcedureExecutorEnabled()) {
+      // Execute the operation synchronously - wait for the operation to 
complete before continuing.
+      long procId =
         this.procedureExecutor.submitProcedure(new 
AddColumnFamilyProcedure(procedureExecutor
             .getEnvironment(), tableName, columnDescriptor));
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+      ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+    } else {
+      new TableAddFamilyHandler(tableName, columnDescriptor, this, 
this).prepare().process();
+    }
     if (cpHost != null) {
       cpHost.postAddColumn(tableName, columnDescriptor);
     }
@@ -1687,11 +1769,15 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
 
-    // Execute the operation synchronously - wait for the operation to 
complete before continuing.
-    long procId =
+    if (isMasterProcedureExecutorEnabled()) {
+      // Execute the operation synchronously - wait for the operation to 
complete before continuing.
+      long procId =
         this.procedureExecutor.submitProcedure(new 
ModifyColumnFamilyProcedure(procedureExecutor
             .getEnvironment(), tableName, descriptor));
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+      ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+    } else {
+      new TableModifyFamilyHandler(tableName, descriptor, this, 
this).prepare().process();
+    }
 
     if (cpHost != null) {
       cpHost.postModifyColumn(tableName, descriptor);
@@ -1709,11 +1795,15 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " delete " + 
Bytes.toString(columnName));
 
-    // Execute the operation synchronously - wait for the operation to 
complete before continuing.
-    long procId =
+    if (isMasterProcedureExecutorEnabled()) {
+      // Execute the operation synchronously - wait for the operation to 
complete before continuing.
+      long procId =
         this.procedureExecutor.submitProcedure(new 
DeleteColumnFamilyProcedure(procedureExecutor
             .getEnvironment(), tableName, columnName));
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+      ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+    } else {
+      new TableDeleteFamilyHandler(tableName, columnName, this, 
this).prepare().process();
+    }
 
     if (cpHost != null) {
       cpHost.postDeleteColumn(tableName, columnName);
@@ -1728,16 +1818,22 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
 
-    // Execute the operation asynchronously - client will check the progress 
of the operation
-    final ProcedurePrepareLatch prepareLatch = 
ProcedurePrepareLatch.createLatch();
-    long procId =
-        this.procedureExecutor.submitProcedure(new 
EnableTableProcedure(procedureExecutor
+    long procId = -1;
+    if (isMasterProcedureExecutorEnabled()) {
+      // Execute the operation asynchronously - client will check the progress 
of the operation
+      final ProcedurePrepareLatch prepareLatch = 
ProcedurePrepareLatch.createLatch();
+      procId =
+          this.procedureExecutor.submitProcedure(new 
EnableTableProcedure(procedureExecutor
             .getEnvironment(), tableName, false, prepareLatch));
-    // Before returning to client, we want to make sure that the table is 
prepared to be
-    // enabled (the table is locked and the table state is set).
-    //
-    // Note: if the procedure throws exception, we will catch it and rethrow.
-    prepareLatch.await();
+      // Before returning to client, we want to make sure that the table is 
prepared to be
+      // enabled (the table is locked and the table state is set).
+      //
+      // Note: if the procedure throws exception, we will catch it and rethrow.
+      prepareLatch.await();
+    } else {
+      this.service.submit(new EnableTableHandler(this, tableName,
+        assignmentManager, tableLockManager, false).prepare());
+    }
 
     if (cpHost != null) {
       cpHost.postEnableTable(tableName);
@@ -1754,17 +1850,22 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
     }
     LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
 
-    // Execute the operation asynchronously - client will check the progress 
of the operation
-    final ProcedurePrepareLatch prepareLatch = 
ProcedurePrepareLatch.createLatch();
-    // Execute the operation asynchronously - client will check the progress 
of the operation
-    long procId =
-        this.procedureExecutor.submitProcedure(new 
DisableTableProcedure(procedureExecutor
+    long procId = -1;
+    if (isMasterProcedureExecutorEnabled()) {
+      // Execute the operation asynchronously - client will check the progress 
of the operation
+      final ProcedurePrepareLatch prepareLatch = 
ProcedurePrepareLatch.createLatch();
+      procId =
+          this.procedureExecutor.submitProcedure(new 
DisableTableProcedure(procedureExecutor
             .getEnvironment(), tableName, false, prepareLatch));
-    // Before returning to client, we want to make sure that the table is 
prepared to be
-    // enabled (the table is locked and the table state is set).
-    //
-    // Note: if the procedure throws exception, we will catch it and rethrow.
-    prepareLatch.await();
+      // Before returning to client, we want to make sure that the table is 
prepared to be
+      // enabled (the table is locked and the table state is set).
+      //
+      // Note: if the procedure throws exception, we will catch it and rethrow.
+      prepareLatch.await();
+    } else {
+      this.service.submit(new DisableTableHandler(this, tableName,
+        assignmentManager, tableLockManager, false).prepare());
+    }
 
     if (cpHost != null) {
       cpHost.postDisableTable(tableName);
@@ -1820,11 +1921,15 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 
     LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
 
-    // Execute the operation synchronously - wait for the operation completes 
before continuing.
-    long procId = this.procedureExecutor.submitProcedure(
+    if (isMasterProcedureExecutorEnabled()) {
+      // Execute the operation synchronously - wait for the operation 
completes before continuing.
+      long procId = this.procedureExecutor.submitProcedure(
         new ModifyTableProcedure(procedureExecutor.getEnvironment(), 
descriptor));
 
-    ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+      ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+    } else {
+      new ModifyTableHandler(tableName, descriptor, this, 
this).prepare().process();
+    }
 
     if (cpHost != null) {
       cpHost.postModifyTable(tableName, descriptor);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 4f09a9c..e9b5528 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -403,6 +403,10 @@ public class MasterRpcServices extends RSRpcServices
     }
   }
 
+  private boolean isValidProcId(final long procId) {
+    return (procId > 0);
+  }
+
   @Override
   public CreateTableResponse createTable(RpcController controller, 
CreateTableRequest req)
   throws ServiceException {
@@ -410,7 +414,11 @@ public class MasterRpcServices extends RSRpcServices
     byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
     try {
       long procId = master.createTable(hTableDescriptor, splitKeys);
-      return CreateTableResponse.newBuilder().setProcId(procId).build();
+      if (isValidProcId(procId)) {
+        return CreateTableResponse.newBuilder().setProcId(procId).build();
+      } else {
+        return CreateTableResponse.newBuilder().build();
+      }
     } catch (IOException ioe) {
       throw new ServiceException(ioe);
     }
@@ -466,7 +474,11 @@ public class MasterRpcServices extends RSRpcServices
       DeleteTableRequest request) throws ServiceException {
     try {
       long procId = 
master.deleteTable(ProtobufUtil.toTableName(request.getTableName()));
-      return DeleteTableResponse.newBuilder().setProcId(procId).build();
+      if (isValidProcId(procId)) {
+        return DeleteTableResponse.newBuilder().setProcId(procId).build();
+      } else {
+        return DeleteTableResponse.newBuilder().build();
+      }
     } catch (IOException ioe) {
       throw new ServiceException(ioe);
     }
@@ -489,7 +501,11 @@ public class MasterRpcServices extends RSRpcServices
       DisableTableRequest request) throws ServiceException {
     try {
       long procId = 
master.disableTable(ProtobufUtil.toTableName(request.getTableName()));
-      return DisableTableResponse.newBuilder().setProcId(procId).build();
+      if (isValidProcId(procId)) {
+        return DisableTableResponse.newBuilder().setProcId(procId).build();
+      } else {
+        return DisableTableResponse.newBuilder().build();
+      }
     } catch (IOException ioe) {
       throw new ServiceException(ioe);
     }
@@ -575,7 +591,11 @@ public class MasterRpcServices extends RSRpcServices
       EnableTableRequest request) throws ServiceException {
     try {
       long procId = 
master.enableTable(ProtobufUtil.toTableName(request.getTableName()));
-      return EnableTableResponse.newBuilder().setProcId(procId).build();
+      if (isValidProcId(procId)) {
+        return EnableTableResponse.newBuilder().setProcId(procId).build();
+      } else {
+        return EnableTableResponse.newBuilder().build();
+      }
     } catch (IOException ioe) {
       throw new ServiceException(ioe);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 59a078e..2a5fb30 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -95,6 +95,11 @@ public interface MasterServices extends Server {
       throws IOException, TableNotFoundException, TableNotDisabledException;
 
   /**
+   * Check whether the procedure executor is enabled
+   */
+  boolean isMasterProcedureExecutorEnabled();
+
+  /**
    * Create a table using the given table definition.
    * @param desc The table definition
    * @param splitKeys Starting row keys for the initial table regions.  If null

http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 74d1339..cece2ad 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
@@ -239,12 +240,22 @@ public class TableNamespaceManager {
     HRegionInfo[] newRegions = new HRegionInfo[]{
         new HRegionInfo(HTableDescriptor.NAMESPACE_TABLEDESC.getTableName(), 
null, null)};
 
-    // we need to create the table this way to bypass checkInitialized
-    masterServices.getMasterProcedureExecutor()
-      .submitProcedure(new CreateTableProcedure(
+    if (masterServices.isMasterProcedureExecutorEnabled()) {
+      // we need to create the table this way to bypass checkInitialized
+      masterServices.getMasterProcedureExecutor()
+        .submitProcedure(new CreateTableProcedure(
           masterServices.getMasterProcedureExecutor().getEnvironment(),
           HTableDescriptor.NAMESPACE_TABLEDESC,
           newRegions));
+    } else {
+      masterServices.getExecutorService()
+          .submit(new CreateTableHandler(masterServices,
+              masterServices.getMasterFileSystem(),
+              HTableDescriptor.NAMESPACE_TABLEDESC,
+              masterServices.getConfiguration(),
+              newRegions,
+              masterServices).prepare());
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index be1d4c5..3c1a4ad 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.namespace.NamespaceAuditor;
+import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
@@ -462,11 +463,18 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private void createQuotaTable() throws IOException {
     HRegionInfo[] newRegions = new HRegionInfo[] { new 
HRegionInfo(QuotaUtil.QUOTA_TABLE_NAME) };
 
-    masterServices.getMasterProcedureExecutor()
-      .submitProcedure(new CreateTableProcedure(
+    if (masterServices.isMasterProcedureExecutorEnabled()) {
+      masterServices.getMasterProcedureExecutor()
+        .submitProcedure(new CreateTableProcedure(
           masterServices.getMasterProcedureExecutor().getEnvironment(),
           QuotaUtil.QUOTA_TABLE_DESC,
           newRegions));
+    } else {
+      masterServices.getExecutorService().submit(
+        new CreateTableHandler(masterServices, 
masterServices.getMasterFileSystem(),
+          QuotaUtil.QUOTA_TABLE_DESC, masterServices.getConfiguration(), 
newRegions,
+          masterServices).prepare());
+    }
   }
 
   private static class NamedLock<T> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index dea5c3a..168a00d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -224,6 +224,11 @@ public class TestCatalogJanitor {
     }
 
     @Override
+    public boolean isMasterProcedureExecutorEnabled() {
+      return true;
+    }
+
+    @Override
     public long createTable(HTableDescriptor desc, byte[][] splitKeys)
         throws IOException {
       // no-op

http://git-wip-us.apache.org/repos/asf/hbase/blob/3eb8021e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestProcedureConf.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestProcedureConf.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestProcedureConf.java
new file mode 100644
index 0000000..efd549f
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestProcedureConf.java
@@ -0,0 +1,93 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestProcedureConf {
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+
+  @Test
+  public void testProcedureConfEnable() throws Exception {
+    TableName tableName = TableName.valueOf("testProcedureConfEnable");
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    MiniHBaseCluster cluster = null;
+
+    try {
+      TEST_UTIL.startMiniCluster();
+      cluster = TEST_UTIL.getHBaseCluster();
+      HMaster m = cluster.getMaster();
+      long procid = m.createTable(htd, null);
+      assertTrue(procid > 0);
+    } finally {
+      if (cluster != null) {
+        TEST_UTIL.shutdownMiniCluster();
+      }
+    }
+  }
+
+  @Test
+  public void testProcedureConfDisable() throws Exception {
+    TableName tableName = TableName.valueOf("testProcedureConfDisable");
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    MiniHBaseCluster cluster = null;
+
+    try {
+      TEST_UTIL.getConfiguration().set("hbase.master.procedure.tableddl", 
"disabled");
+      TEST_UTIL.startMiniCluster();
+      cluster = TEST_UTIL.getHBaseCluster();
+      HMaster m = cluster.getMaster();
+      long procid = m.createTable(htd, null);
+      assertTrue(procid < 0);
+    } finally {
+      if (cluster != null) {
+        TEST_UTIL.shutdownMiniCluster();
+      }
+    }
+  }
+
+  @Test
+  public void testProcedureConfUnused() throws Exception {
+    TableName tableName = TableName.valueOf("testProcedureConfUnused");
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    MiniHBaseCluster cluster = null;
+
+    try {
+      TEST_UTIL.getConfiguration().set("hbase.master.procedure.tableddl", 
"unused");
+      TEST_UTIL.startMiniCluster();
+      cluster = TEST_UTIL.getHBaseCluster();
+      HMaster m = cluster.getMaster();
+      long procid = m.createTable(htd, null);
+      assertTrue(procid < 0);
+    } finally {
+      if (cluster != null) {
+        TEST_UTIL.shutdownMiniCluster();
+      }
+    }
+  }
+}

Reply via email to