This is an automated email from the ASF dual-hosted git repository.

andor pushed a commit to branch HBASE-29081
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit aaa0c9ef5271edcb03208639f43f649be0071199
Author: Anuj Sharma <[email protected]>
AuthorDate: Tue Apr 22 21:58:45 2025 +0530

    HBASE-29083: Add global read-only mode to HBase (#6757)
    
    * HBASE-29083: Add global read-only mode to HBase
    
    Add hbase read-only property and  ReadOnlyController
    
    (cherry picked from commit 49b678da90288bc645fcbfb8c0bbd27b33281c0f)
    
    * HBASE-29083. Allow test to update hbase:meta table
    
    * HBASE-29083. Spotless apply
    
    * Refactor code to have only passing tests
    
    * Apply spotless
    
    ---------
    
    Co-authored-by: Andor Molnar <[email protected]>
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  10 +
 .../hbase/security/access/ReadOnlyController.java  | 393 +++++++++++++++++++++
 .../security/access/TestReadOnlyController.java    | 100 ++++++
 3 files changed, 503 insertions(+)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 3230d92b3e1..58c515d1ef9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1658,6 +1658,16 @@ public final class HConstants {
    */
   public final static String HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE = "";
 
+  /**
+   * Should HBase only serve Read Requests
+   */
+  public final static String HBASE_GLOBAL_READONLY_ENABLED_KEY = 
"hbase.global.readonly.enabled";
+
+  /**
+   * Default value of {@link #HBASE_GLOBAL_READONLY_ENABLED_KEY}
+   */
+  public final static boolean HBASE_GLOBAL_READONLY_ENABLED_DEFAULT = false;
+
   private HConstants() {
     // Can't be instantiated with this ctor.
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
new file mode 100644
index 00000000000..90d154ebec5
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
+import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
+import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.Message;
+import org.apache.hbase.thirdparty.com.google.protobuf.Service;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+
+@CoreCoprocessor
[email protected](HBaseInterfaceAudience.CONFIG)
+public class ReadOnlyController
+  implements MasterCoprocessor, RegionCoprocessor, MasterObserver, 
RegionObserver,
+  RegionServerCoprocessor, RegionServerObserver, EndpointObserver, 
BulkLoadObserver {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReadOnlyController.class);
+  private Configuration conf;
+
+  private void internalReadOnlyGuard() throws IOException {
+    if (
+      conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
+        HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT)
+    ) {
+      // throw new FailedSanityCheckException("Operation not allowed in 
Read-Only Mode");
+      throw new IOException("Operation not allowed in Read-Only Mode");
+    }
+  }
+
+  @Override
+  public void start(CoprocessorEnvironment env) throws IOException {
+    conf = env.getConfiguration();
+  }
+
+  @Override
+  public void stop(CoprocessorEnvironment env) {
+  }
+
+  /* ---- RegionObserver Overrides ---- */
+  @Override
+  public Optional<RegionObserver> getRegionObserver() {
+    return Optional.of(this);
+  }
+
+  @Override
+  public void prePut(ObserverContext<? extends RegionCoprocessorEnvironment> 
c, Put put,
+    WALEdit edit) throws IOException {
+    if (edit.isMetaEdit() || edit.isEmpty()) {
+      return;
+    }
+    internalReadOnlyGuard();
+  }
+
+  @Override
+  public void preDelete(ObserverContext<? extends 
RegionCoprocessorEnvironment> c, Delete delete,
+    WALEdit edit) throws IOException {
+    internalReadOnlyGuard();
+  }
+
+  @Override
+  public void preBatchMutate(ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+    for (int i = 0; i < miniBatchOp.size(); i++) {
+      WALEdit edit = miniBatchOp.getWalEdit(i);
+      if (edit == null || edit.isMetaEdit() || edit.isEmpty()) {
+        continue;
+      }
+      internalReadOnlyGuard();
+    }
+  }
+
+  @Override
+  public void preFlush(final ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    FlushLifeCycleTracker tracker) throws IOException {
+    internalReadOnlyGuard();
+  }
+
+  @Override
+  public boolean preCheckAndPut(ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    byte[] row, byte[] family, byte[] qualifier, CompareOperator op, 
ByteArrayComparable comparator,
+    Put put, boolean result) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndPut(c, row, family, qualifier, op, 
comparator, put,
+      result);
+  }
+
+  @Override
+  public boolean preCheckAndPut(ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    byte[] row, Filter filter, Put put, boolean result) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndPut(c, row, filter, put, result);
+  }
+
+  @Override
+  public boolean preCheckAndPutAfterRowLock(
+    ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, 
byte[] family,
+    byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put 
put, boolean result)
+    throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndPutAfterRowLock(c, row, family, 
qualifier, op,
+      comparator, put, result);
+  }
+
+  @Override
+  public boolean preCheckAndPutAfterRowLock(
+    ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, 
Filter filter, Put put,
+    boolean result) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndPutAfterRowLock(c, row, filter, 
put, result);
+  }
+
+  @Override
+  public boolean preCheckAndDelete(ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    byte[] row, byte[] family, byte[] qualifier, CompareOperator op, 
ByteArrayComparable comparator,
+    Delete delete, boolean result) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndDelete(c, row, family, qualifier, 
op, comparator, delete,
+      result);
+  }
+
+  @Override
+  public boolean preCheckAndDelete(ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    byte[] row, Filter filter, Delete delete, boolean result) throws 
IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndDelete(c, row, filter, delete, 
result);
+  }
+
+  @Override
+  public boolean preCheckAndDeleteAfterRowLock(
+    ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, 
byte[] family,
+    byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, 
Delete delete,
+    boolean result) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndDeleteAfterRowLock(c, row, family, 
qualifier, op,
+      comparator, delete, result);
+  }
+
+  @Override
+  public boolean preCheckAndDeleteAfterRowLock(
+    ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, 
Filter filter,
+    Delete delete, boolean result) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preCheckAndDeleteAfterRowLock(c, row, filter, 
delete, result);
+  }
+
+  @Override
+  public Result preAppend(ObserverContext<? extends 
RegionCoprocessorEnvironment> c, Append append)
+    throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preAppend(c, append);
+  }
+
+  @Override
+  public Result preAppend(ObserverContext<? extends 
RegionCoprocessorEnvironment> c, Append append,
+    WALEdit edit) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preAppend(c, append, edit);
+  }
+
+  @Override
+  public Result preAppendAfterRowLock(ObserverContext<? extends 
RegionCoprocessorEnvironment> c,
+    Append append) throws IOException {
+    internalReadOnlyGuard();
+    return RegionObserver.super.preAppendAfterRowLock(c, append);
+  }
+
+  @Override
+  public void preBulkLoadHFile(ObserverContext<? extends 
RegionCoprocessorEnvironment> ctx,
+    List<Pair<byte[], String>> familyPaths) throws IOException {
+    internalReadOnlyGuard();
+    RegionObserver.super.preBulkLoadHFile(ctx, familyPaths);
+  }
+
+  /* ---- MasterObserver Overrides ---- */
+  @Override
+  public Optional<MasterObserver> getMasterObserver() {
+    return Optional.of(this);
+  }
+
+  @Override
+  public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    TableDescriptor desc, RegionInfo[] regions) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preCreateTable(ctx, desc, regions);
+  }
+
+  @Override
+  public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> 
ctx, TableName tableName)
+    throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preDeleteTable(ctx, tableName);
+  }
+
+  @Override
+  public void 
preDeleteTableAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    TableName tableName) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preDeleteTableAction(ctx, tableName);
+  }
+
+  @Override
+  public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    TableName tableName) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preTruncateTable(ctx, tableName);
+  }
+
+  @Override
+  public void 
preTruncateTableAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    TableName tableName) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preTruncateTableAction(ctx, tableName);
+  }
+
+  @Override
+  public TableDescriptor 
preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    TableName tableName, TableDescriptor currentDescriptor, TableDescriptor 
newDescriptor)
+    throws IOException {
+    internalReadOnlyGuard();
+    return MasterObserver.super.preModifyTable(ctx, tableName, 
currentDescriptor, newDescriptor);
+  }
+
+  @Override
+  public void preSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws 
IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preSnapshot(ctx, snapshot, tableDescriptor);
+  }
+
+  @Override
+  public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws 
IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preCloneSnapshot(ctx, snapshot, tableDescriptor);
+  }
+
+  @Override
+  public void preRestoreSnapshot(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws 
IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preRestoreSnapshot(ctx, snapshot, tableDescriptor);
+  }
+
+  @Override
+  public void preDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    SnapshotDescription snapshot) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preDeleteSnapshot(ctx, snapshot);
+  }
+
+  @Override
+  public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    NamespaceDescriptor ns) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preCreateNamespace(ctx, ns);
+  }
+
+  @Override
+  public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor 
newNsDescriptor)
+    throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preModifyNamespace(ctx, currentNsDescriptor, 
newNsDescriptor);
+  }
+
+  @Override
+  public void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+    String namespace) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preDeleteNamespace(ctx, namespace);
+  }
+
+  @Override
+  public void 
preMergeRegionsAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
+    RegionInfo[] regionsToMerge) throws IOException {
+    internalReadOnlyGuard();
+    MasterObserver.super.preMergeRegionsAction(ctx, regionsToMerge);
+  }
+
+  /* ---- RegionServerObserver Overrides ---- */
+  @Override
+  public void 
preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx)
+    throws IOException {
+    internalReadOnlyGuard();
+    RegionServerObserver.super.preRollWALWriterRequest(ctx);
+  }
+
+  @Override
+  public void 
preClearCompactionQueues(ObserverContext<RegionServerCoprocessorEnvironment> 
ctx)
+    throws IOException {
+    internalReadOnlyGuard();
+    RegionServerObserver.super.preClearCompactionQueues(ctx);
+  }
+
+  @Override
+  public void 
preExecuteProcedures(ObserverContext<RegionServerCoprocessorEnvironment> ctx)
+    throws IOException {
+    internalReadOnlyGuard();
+    RegionServerObserver.super.preExecuteProcedures(ctx);
+  }
+
+  @Override
+  public void 
preReplicationSinkBatchMutate(ObserverContext<RegionServerCoprocessorEnvironment>
 ctx,
+    AdminProtos.WALEntry walEntry, Mutation mutation) throws IOException {
+    internalReadOnlyGuard();
+    RegionServerObserver.super.preReplicationSinkBatchMutate(ctx, walEntry, 
mutation);
+  }
+
+  @Override
+  public void 
preClearRegionBlockCache(ObserverContext<RegionServerCoprocessorEnvironment> 
ctx)
+    throws IOException {
+    internalReadOnlyGuard();
+    RegionServerObserver.super.preClearRegionBlockCache(ctx);
+  }
+
+  /* ---- EndpointObserver Overrides ---- */
+  @Override
+  public Message preEndpointInvocation(ObserverContext<? extends 
RegionCoprocessorEnvironment> ctx,
+    Service service, String methodName, Message request) throws IOException {
+    internalReadOnlyGuard();
+    return EndpointObserver.super.preEndpointInvocation(ctx, service, 
methodName, request);
+  }
+
+  /* ---- BulkLoadObserver Overrides ---- */
+  @Override
+  public void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> 
ctx)
+    throws IOException {
+    internalReadOnlyGuard();
+    BulkLoadObserver.super.prePrepareBulkLoad(ctx);
+  }
+
+  @Override
+  public void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> 
ctx)
+    throws IOException {
+    internalReadOnlyGuard();
+    BulkLoadObserver.super.preCleanupBulkLoad(ctx);
+  }
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
new file mode 100644
index 00000000000..1b286214e6d
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.SecurityTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ SecurityTests.class, LargeTests.class })
+@SuppressWarnings("deprecation")
+public class TestReadOnlyController {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestReadOnlyController.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
+  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static TableName TEST_TABLE = TableName.valueOf("readonlytesttable");
+  private static byte[] TEST_FAMILY = Bytes.toBytes("readonlytablecolfam");
+  private static Configuration conf;
+  private static Connection connection;
+
+  private static RegionServerCoprocessorEnvironment RSCP_ENV;
+
+  private static Table TestTable;
+  @Rule
+  public TestName name = new TestName();
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    // Only try once so that if there is failure in connection then test 
should fail faster
+    conf.setInt("hbase.ipc.client.connect.max.retries", 1);
+    // Shorter session timeout is added so that in case failures test should 
not take more time
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+    // Enable ReadOnly mode for the cluster
+    conf.setBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, true);
+    // Add the ReadOnlyController coprocessor for region server to interrupt 
any write operation
+    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
ReadOnlyController.class.getName());
+    // Add the ReadOnlyController coprocessor to for master to interrupt any 
write operation
+    conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, 
ReadOnlyController.class.getName());
+    // Start the test cluster
+    TEST_UTIL.startMiniCluster(2);
+    // Get connection to the HBase
+    connection = ConnectionFactory.createConnection(conf);
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    if (connection != null) {
+      connection.close();
+    }
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test(expected = IOException.class)
+  public void testCreateTable() throws IOException {
+    TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
+  }
+}

Reply via email to