This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new a5748a227ff HBASE-28376 Column family ns does not exist in region 
during upgrade 3.0.0-beta-2 (#5697)
a5748a227ff is described below

commit a5748a227ff700b5e4de8fc9b84d5bd2ecec9f7e
Author: Duo Zhang <zhang...@apache.org>
AuthorDate: Sun Mar 10 16:39:26 2024 +0800

    HBASE-28376 Column family ns does not exist in region during upgrade 
3.0.0-beta-2 (#5697)
    
    Signed-off-by: Bryan Beaudreault <bbeaudrea...@apache.org>
---
 .../protobuf/server/master/MasterProcedure.proto   |   9 ++
 .../hbase/master/ClusterSchemaServiceImpl.java     |   4 +-
 .../hadoop/hbase/master/TableNamespaceManager.java | 135 ++++++++++++++-----
 .../procedure/MigrateNamespaceTableProcedure.java  | 145 +++++++++++++++++++++
 .../hadoop/hbase/util/FSTableDescriptors.java      |  35 ++---
 .../hbase/master/TestMigrateNamespaceTable.java    | 133 ++++++++++++++++++-
 6 files changed, 402 insertions(+), 59 deletions(-)

diff --git 
a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto
index c562a4e5c2f..c9c9c635731 100644
--- 
a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto
+++ 
b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto
@@ -771,3 +771,12 @@ enum MigrateReplicationQueueFromZkToTableState {
 message MigrateReplicationQueueFromZkToTableStateData {
   repeated string disabled_peer_id = 1;
 }
+
+enum MigrateNamespaceTableProcedureState {
+  MIGRATE_NAMESPACE_TABLE_ADD_FAMILY = 1;
+  MIGRATE_NAMESPACE_TABLE_MIGRATE_DATA = 2;
+  MIGRATE_NAMESPACE_TABLE_DISABLE_TABLE = 3;
+}
+
+message MigrateNamespaceTableProcedureStateData {
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
index 39d00d0908e..27a21a8d1c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
@@ -59,8 +59,8 @@ class ClusterSchemaServiceImpl extends AbstractService 
implements ClusterSchemaS
     try {
       notifyStarted();
       this.tableNamespaceManager.start();
-    } catch (IOException ioe) {
-      notifyFailed(ioe);
+    } catch (IOException | InterruptedException e) {
+      notifyFailed(e);
     }
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index a3b5afe98d5..4d18b2ad8f4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -19,29 +19,29 @@ package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.MigrateNamespaceTableProcedure;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -66,61 +66,112 @@ public class TableNamespaceManager {
 
   private final MasterServices masterServices;
 
+  private volatile boolean migrationDone;
+
   TableNamespaceManager(MasterServices masterServices) {
     this.masterServices = masterServices;
   }
 
-  private void migrateNamespaceTable() throws IOException {
-    try (Table nsTable = 
masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
-      ResultScanner scanner = nsTable.getScanner(
-        new 
Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions());
-      BufferedMutator mutator =
-        
masterServices.getConnection().getBufferedMutator(TableName.META_TABLE_NAME)) {
+  private void tryMigrateNamespaceTable() throws IOException, 
InterruptedException {
+    Optional<MigrateNamespaceTableProcedure> opt = 
masterServices.getProcedures().stream()
+      .filter(p -> p instanceof MigrateNamespaceTableProcedure)
+      .map(p -> (MigrateNamespaceTableProcedure) p).findAny();
+    if (!opt.isPresent()) {
+      // the procedure is not present, check whether have the ns family in 
meta table
+      TableDescriptor metaTableDesc =
+        masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME);
+      if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
+        // normal case, upgrading is done or the cluster is created with 3.x 
code
+        migrationDone = true;
+      } else {
+        // submit the migration procedure
+        MigrateNamespaceTableProcedure proc = new 
MigrateNamespaceTableProcedure();
+        masterServices.getMasterProcedureExecutor().submitProcedure(proc);
+      }
+    } else {
+      if (opt.get().isFinished()) {
+        // the procedure is already done
+        migrationDone = true;
+      }
+      // we have already submitted the procedure, continue
+    }
+  }
+
+  private void addToCache(Result result, byte[] family, byte[] qualifier) 
throws IOException {
+    Cell cell = result.getColumnLatestCell(family, qualifier);
+    NamespaceDescriptor ns =
+      
ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream
+        .newInstance(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength())));
+    cache.put(ns.getName(), ns);
+  }
+
+  private void loadFromMeta() throws IOException {
+    try (Table table = 
masterServices.getConnection().getTable(TableName.META_TABLE_NAME);
+      ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) {
       for (Result result;;) {
         result = scanner.next();
         if (result == null) {
           break;
         }
-        Put put = new Put(result.getRow());
-        result
-          .getColumnCells(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
-            TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES)
-          .forEach(c -> put.addColumn(HConstants.NAMESPACE_FAMILY,
-            HConstants.NAMESPACE_COL_DESC_QUALIFIER, c.getTimestamp(), 
CellUtil.cloneValue(c)));
-        mutator.mutate(put);
+        addToCache(result, HConstants.NAMESPACE_FAMILY, 
HConstants.NAMESPACE_COL_DESC_QUALIFIER);
       }
     }
-    // schedule a disable procedure instead of block waiting here, as when 
disabling a table we will
-    // wait until master is initialized, but we are part of the 
initialization...
-    masterServices.getMasterProcedureExecutor().submitProcedure(
-      new 
DisableTableProcedure(masterServices.getMasterProcedureExecutor().getEnvironment(),
-        TableName.NAMESPACE_TABLE_NAME, false));
   }
 
-  private void loadNamespaceIntoCache() throws IOException {
-    try (Table table = 
masterServices.getConnection().getTable(TableName.META_TABLE_NAME);
-      ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) {
+  private void loadFromNamespace() throws IOException {
+    try (Table table = 
masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
+      ResultScanner scanner =
+        table.getScanner(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES)) {
       for (Result result;;) {
         result = scanner.next();
         if (result == null) {
           break;
         }
-        Cell cell = result.getColumnLatestCell(HConstants.NAMESPACE_FAMILY,
-          HConstants.NAMESPACE_COL_DESC_QUALIFIER);
-        NamespaceDescriptor ns = ProtobufUtil
-          
.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream
-            .newInstance(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength())));
-        cache.put(ns.getName(), ns);
+        addToCache(result, TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
+          TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES);
       }
     }
   }
 
-  public void start() throws IOException {
-    TableState nsTableState = 
MetaTableAccessor.getTableState(masterServices.getConnection(),
-      TableName.NAMESPACE_TABLE_NAME);
-    if (nsTableState != null && nsTableState.isEnabled()) {
-      migrateNamespaceTable();
+  private boolean shouldLoadFromMeta() throws IOException {
+    if (migrationDone) {
+      return true;
     }
+    // the implementation is bit tricky
+    // if there is already a disable namespace table procedure or the 
namespace table is already
+    // disabled, we are safe to read from meta table as the migration is 
already done. If not, since
+    // we are part of the master initialization work, so we can make sure that 
when reaching here,
+    // the master has not been marked as initialize yet. And 
DisableTableProcedure can only be
+    // executed after master is initialized, so here we are safe to read from 
namespace table,
+    // without worrying about that the namespace table is disabled while we 
are reading and crash
+    // the master startup.
+    if (
+      
masterServices.getTableStateManager().isTableState(TableName.NAMESPACE_TABLE_NAME,
+        TableState.State.DISABLED)
+    ) {
+      return true;
+    }
+    if (
+      masterServices.getProcedures().stream().filter(p -> p instanceof 
DisableTableProcedure)
+        .anyMatch(
+          p -> ((DisableTableProcedure) 
p).getTableName().equals(TableName.NAMESPACE_TABLE_NAME))
+    ) {
+      return true;
+    }
+    return false;
+  }
+
+  private void loadNamespaceIntoCache() throws IOException {
+    if (shouldLoadFromMeta()) {
+      loadFromMeta();
+    } else {
+      loadFromNamespace();
+    }
+
+  }
+
+  public void start() throws IOException, InterruptedException {
+    tryMigrateNamespaceTable();
     loadNamespaceIntoCache();
   }
 
@@ -135,7 +186,14 @@ public class TableNamespaceManager {
     return cache.get(name);
   }
 
+  private void checkMigrationDone() throws IOException {
+    if (!migrationDone) {
+      throw new HBaseIOException("namespace migration is ongoing, modification 
is disallowed");
+    }
+  }
+
   public void addOrUpdateNamespace(NamespaceDescriptor ns) throws IOException {
+    checkMigrationDone();
     insertNamespaceToMeta(masterServices.getConnection(), ns);
     cache.put(ns.getName(), ns);
   }
@@ -152,6 +210,7 @@ public class TableNamespaceManager {
   }
 
   public void deleteNamespace(String namespaceName) throws IOException {
+    checkMigrationDone();
     Delete d = new Delete(Bytes.toBytes(namespaceName));
     try (Table table = 
masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) {
       table.delete(d);
@@ -174,6 +233,10 @@ public class TableNamespaceManager {
     }
   }
 
+  public void setMigrationDone() {
+    migrationDone = true;
+  }
+
   public static long getMaxTables(NamespaceDescriptor ns) throws IOException {
     String value = ns.getConfigurationValue(KEY_MAX_TABLES);
     long maxTables = 0;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
new file mode 100644
index 00000000000..dc9eac4c879
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MigrateNamespaceTableProcedureState;
+
+/**
+ * Migrate the namespace data to meta table's namespace family while upgrading
+ */
+@InterfaceAudience.Private
+public class MigrateNamespaceTableProcedure
+  extends StateMachineProcedure<MasterProcedureEnv, 
MigrateNamespaceTableProcedureState>
+  implements GlobalProcedureInterface {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(MigrateNamespaceTableProcedure.class);
+
+  private RetryCounter retryCounter;
+
+  @Override
+  public String getGlobalId() {
+    return getClass().getSimpleName();
+  }
+
+  private void migrate(MasterProcedureEnv env) throws IOException {
+    Connection conn = env.getMasterServices().getConnection();
+    try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME);
+      ResultScanner scanner = nsTable.getScanner(
+        new 
Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions());
+      BufferedMutator mutator = 
conn.getBufferedMutator(TableName.META_TABLE_NAME)) {
+      for (Result result;;) {
+        result = scanner.next();
+        if (result == null) {
+          break;
+        }
+        Put put = new Put(result.getRow());
+        result
+          .getColumnCells(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
+            TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES)
+          .forEach(c -> put.addColumn(HConstants.NAMESPACE_FAMILY,
+            HConstants.NAMESPACE_COL_DESC_QUALIFIER, c.getTimestamp(), 
CellUtil.cloneValue(c)));
+        mutator.mutate(put);
+      }
+    }
+  }
+
+  @Override
+  protected Flow executeFromState(MasterProcedureEnv env, 
MigrateNamespaceTableProcedureState state)
+    throws ProcedureSuspendedException, ProcedureYieldException, 
InterruptedException {
+    try {
+      switch (state) {
+        case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY:
+          TableDescriptor metaTableDesc =
+            
env.getMasterServices().getTableDescriptors().get(TableName.META_TABLE_NAME);
+          if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
+            TableDescriptor newMetaTableDesc = 
TableDescriptorBuilder.newBuilder(metaTableDesc)
+              .setColumnFamily(
+                
FSTableDescriptors.getNamespaceFamilyDescForMeta(env.getMasterConfiguration()))
+              .build();
+            addChildProcedure(new ModifyTableProcedure(env, newMetaTableDesc));
+          }
+          
setNextState(MigrateNamespaceTableProcedureState.MIGRATE_NAMESPACE_TABLE_MIGRATE_DATA);
+          return Flow.HAS_MORE_STATE;
+        case MIGRATE_NAMESPACE_TABLE_MIGRATE_DATA:
+          migrate(env);
+          
setNextState(MigrateNamespaceTableProcedureState.MIGRATE_NAMESPACE_TABLE_DISABLE_TABLE);
+          return Flow.HAS_MORE_STATE;
+        case MIGRATE_NAMESPACE_TABLE_DISABLE_TABLE:
+          addChildProcedure(new DisableTableProcedure(env, 
TableName.NAMESPACE_TABLE_NAME, false));
+          return Flow.NO_MORE_STATE;
+        default:
+          throw new UnsupportedOperationException("Unhandled state=" + state);
+      }
+    } catch (IOException e) {
+      if (retryCounter == null) {
+        retryCounter = 
ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
+      }
+      long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
+      LOG.warn("Failed migrating namespace data, suspend {}secs {}", backoff / 
1000, this, e);
+      throw suspend(Math.toIntExact(backoff), true);
+    }
+  }
+
+  @Override
+  protected void rollbackState(MasterProcedureEnv env, 
MigrateNamespaceTableProcedureState state)
+    throws IOException, InterruptedException {
+  }
+
+  @Override
+  protected MigrateNamespaceTableProcedureState getState(int stateId) {
+    return MigrateNamespaceTableProcedureState.forNumber(stateId);
+  }
+
+  @Override
+  protected int getStateId(MigrateNamespaceTableProcedureState state) {
+    return state.getNumber();
+  }
+
+  @Override
+  protected MigrateNamespaceTableProcedureState getInitialState() {
+    return 
MigrateNamespaceTableProcedureState.MIGRATE_NAMESPACE_TABLE_ADD_FAMILY;
+  }
+
+  @Override
+  protected void completionCleanup(MasterProcedureEnv env) {
+    
env.getMasterServices().getClusterSchema().getTableNamespaceManager().setMigrationDone();
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 27d66d14ca7..75bf721ef41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -168,16 +169,28 @@ public class FSTableDescriptors implements 
TableDescriptors {
       .setMaxVersions(
         conf.getInt(HConstants.HBASE_META_VERSIONS, 
HConstants.DEFAULT_HBASE_META_VERSIONS))
       .setInMemory(true).setBlocksize(8 * 
1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-      
.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
-      .setBloomFilterType(BloomType.ROWCOL).build();
+      
.setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).setBloomFilterType(BloomType.ROWCOL)
+      .build();
   }
 
   public static ColumnFamilyDescriptor getReplBarrierFamilyDescForMeta() {
     return 
ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
       .setMaxVersions(HConstants.ALL_VERSIONS).setInMemory(true)
       .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-      
.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
-      .setBloomFilterType(BloomType.ROWCOL).build();
+      
.setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).setBloomFilterType(BloomType.ROWCOL)
+      .build();
+  }
+
+  public static ColumnFamilyDescriptor 
getNamespaceFamilyDescForMeta(Configuration conf) {
+    return 
ColumnFamilyDescriptorBuilder.newBuilder(HConstants.NAMESPACE_FAMILY)
+      .setMaxVersions(
+        conf.getInt(HConstants.HBASE_META_VERSIONS, 
HConstants.DEFAULT_HBASE_META_VERSIONS))
+      .setInMemory(true)
+      .setBlocksize(
+        conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, 
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+      
.setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).setBloomFilterType(BloomType.ROWCOL)
+      .build();
   }
 
   private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final 
Configuration conf)
@@ -193,20 +206,10 @@ public class FSTableDescriptors implements 
TableDescriptors {
         .setBlocksize(
           conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, 
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
         
.setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.ROWCOL)
-        
.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
-        .build())
+        .setDataBlockEncoding(DataBlockEncoding.ROW_INDEX_V1).build())
       .setColumnFamily(getTableFamilyDescForMeta(conf))
       .setColumnFamily(getReplBarrierFamilyDescForMeta())
-      
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.NAMESPACE_FAMILY)
-        .setMaxVersions(
-          conf.getInt(HConstants.HBASE_META_VERSIONS, 
HConstants.DEFAULT_HBASE_META_VERSIONS))
-        .setInMemory(true)
-        .setBlocksize(
-          conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, 
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
-        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-        
.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.ROW_INDEX_V1)
-        .setBloomFilterType(BloomType.ROWCOL).build())
-      .setCoprocessor(
+      .setColumnFamily(getNamespaceFamilyDescForMeta(conf)).setCoprocessor(
         
CoprocessorDescriptorBuilder.newBuilder(MultiRowMutationEndpoint.class.getName())
           .setPriority(Coprocessor.PRIORITY_SYSTEM).build());
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
index b1130363675..30dd308c28f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
@@ -19,20 +19,34 @@ package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThrows;
 
 import java.io.IOException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.StartTestingClusterOption;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -41,6 +55,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
 
 /**
  * Testcase for HBASE-21154.
@@ -54,6 +69,75 @@ public class TestMigrateNamespaceTable {
 
   private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
 
+  private static volatile boolean CONTINUE = false;
+
+  // used to halt the migration procedure
+  public static final class SuspendProcedure extends 
Procedure<MasterProcedureEnv>
+    implements TableProcedureInterface {
+
+    @Override
+    public TableName getTableName() {
+      return TableName.META_TABLE_NAME;
+    }
+
+    @Override
+    public TableOperationType getTableOperationType() {
+      return TableOperationType.CREATE;
+    }
+
+    @Override
+    protected LockState acquireLock(final MasterProcedureEnv env) {
+      if (env.getProcedureScheduler().waitTableExclusiveLock(this, 
getTableName())) {
+        return LockState.LOCK_EVENT_WAIT;
+      }
+      return LockState.LOCK_ACQUIRED;
+    }
+
+    @Override
+    protected void releaseLock(final MasterProcedureEnv env) {
+      env.getProcedureScheduler().wakeTableExclusiveLock(this, getTableName());
+    }
+
+    @Override
+    protected boolean holdLock(MasterProcedureEnv env) {
+      return true;
+    }
+
+    @Override
+    protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+      throws ProcedureYieldException, ProcedureSuspendedException, 
InterruptedException {
+      if (CONTINUE) {
+        return null;
+      }
+      throw suspend(1000, true);
+    }
+
+    @Override
+    protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) {
+      setState(ProcedureProtos.ProcedureState.RUNNABLE);
+      env.getProcedureScheduler().addFront(this);
+      return false;
+    }
+
+    @Override
+    protected void rollback(MasterProcedureEnv env) throws IOException, 
InterruptedException {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    protected boolean abort(MasterProcedureEnv env) {
+      return true;
+    }
+
+    @Override
+    protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    }
+
+    @Override
+    protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    }
+  }
+
   @BeforeClass
   public static void setUp() throws Exception {
     StartTestingClusterOption option = 
StartTestingClusterOption.builder().numMasters(1)
@@ -66,6 +150,32 @@ public class TestMigrateNamespaceTable {
     UTIL.shutdownMiniCluster();
   }
 
+  // simulate upgrading scenario, where we do not have the ns family
+  private void removeNamespaceFamily() throws IOException {
+    FileSystem fs = UTIL.getTestFileSystem();
+    Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
+    Path tableDir = CommonFSUtils.getTableDir(rootDir, 
TableName.META_TABLE_NAME);
+    TableDescriptor metaTableDesc = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+    TableDescriptor noNsMetaTableDesc = 
TableDescriptorBuilder.newBuilder(metaTableDesc)
+      .removeColumnFamily(HConstants.NAMESPACE_FAMILY).build();
+    FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, 
noNsMetaTableDesc,
+      true);
+    for (FileStatus status : fs.listStatus(tableDir)) {
+      if (!status.isDirectory()) {
+        continue;
+      }
+      Path familyPath = new Path(status.getPath(), 
HConstants.NAMESPACE_FAMILY_STR);
+      fs.delete(familyPath, true);
+    }
+  }
+
+  private void addNamespace(Table table, NamespaceDescriptor nd) throws 
IOException {
+    table.put(new Put(Bytes.toBytes(nd.getName())).addColumn(
+      TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
+      TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES,
+      ProtobufUtil.toProtoNamespaceDescriptor(nd).toByteArray()));
+  }
+
   @Test
   public void testMigrate() throws IOException, InterruptedException {
     UTIL.getAdmin().createTable(TableDescriptorBuilder.NAMESPACE_TABLEDESC);
@@ -73,17 +183,21 @@ public class TestMigrateNamespaceTable {
       for (int i = 0; i < 5; i++) {
         NamespaceDescriptor nd = NamespaceDescriptor.create("Test-NS-" + i)
           .addConfiguration("key-" + i, "value-" + i).build();
-        table.put(new Put(Bytes.toBytes(nd.getName())).addColumn(
-          TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
-          TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES,
-          ProtobufUtil.toProtoNamespaceDescriptor(nd).toByteArray()));
+        addNamespace(table, nd);
         AbstractStateMachineNamespaceProcedure
           
.createDirectory(UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(), 
nd);
       }
+      // add default and system
+      addNamespace(table, NamespaceDescriptor.DEFAULT_NAMESPACE);
+      addNamespace(table, NamespaceDescriptor.SYSTEM_NAMESPACE);
     }
     MasterThread masterThread = UTIL.getMiniHBaseCluster().getMasterThread();
+    masterThread.getMaster().getMasterProcedureExecutor().submitProcedure(new 
SuspendProcedure());
     masterThread.getMaster().stop("For testing");
     masterThread.join();
+
+    removeNamespaceFamily();
+
     UTIL.getMiniHBaseCluster().startMaster();
 
     // 5 + default and system('hbase')
@@ -94,7 +208,16 @@ public class TestMigrateNamespaceTable {
       assertEquals(1, nd.getConfiguration().size());
       assertEquals("value-" + i, nd.getConfigurationValue("key-" + i));
     }
+    // before migration done, modification on the namespace is not supported
+    TableNamespaceManager tableNsMgr =
+      
UTIL.getMiniHBaseCluster().getMaster().getClusterSchema().getTableNamespaceManager();
+    assertThrows(IOException.class, () -> 
tableNsMgr.deleteNamespace("Test-NS-0"));
+    assertThrows(IOException.class,
+      () -> 
tableNsMgr.addOrUpdateNamespace(NamespaceDescriptor.create("NNN").build()));
+    CONTINUE = true;
     UTIL.waitFor(30000, () -> 
UTIL.getAdmin().isTableDisabled(TableName.NAMESPACE_TABLE_NAME));
+    // this time it is allowed to change the namespace
+    UTIL.getAdmin().createNamespace(NamespaceDescriptor.create("NNN").build());
 
     masterThread = UTIL.getMiniHBaseCluster().getMasterThread();
     masterThread.getMaster().stop("For testing");
@@ -102,7 +225,7 @@ public class TestMigrateNamespaceTable {
     UTIL.getMiniHBaseCluster().startMaster();
 
     // make sure that we could still restart the cluster after disabling the 
namespace table.
-    assertEquals(7, UTIL.getAdmin().listNamespaceDescriptors().length);
+    assertEquals(8, UTIL.getAdmin().listNamespaceDescriptors().length);
 
     // let's delete the namespace table
     UTIL.getAdmin().deleteTable(TableName.NAMESPACE_TABLE_NAME);

Reply via email to