HBASE-19965 Fix flaky TestAsyncRegionAdminApi; ADDENDUM do same to 
TestAsyncTableAdminAPI


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4622ffa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4622ffa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4622ffa

Branch: refs/heads/HBASE-19064
Commit: b4622ffad7011b3d29bce6376edb896de6df9f6e
Parents: cf57ea1
Author: Michael Stack <st...@apache.org>
Authored: Mon Feb 12 22:52:56 2018 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Feb 12 22:52:56 2018 -0800

----------------------------------------------------------------------
 .../hbase/client/TestAsyncTableAdminApi.java    | 249 +--------------
 .../hbase/client/TestAsyncTableAdminApi2.java   | 299 +++++++++++++++++++
 2 files changed, 301 insertions(+), 247 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4622ffa/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 597a3d7..a3129f8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -30,11 +30,9 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.Set;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
 import java.util.regex.Pattern;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HConstants;
@@ -43,12 +41,9 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
 import org.apache.hadoop.hbase.master.LoadBalancer;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -58,6 +53,8 @@ import org.junit.runners.Parameterized;
 
 /**
  * Class to test asynchronous table admin operations.
+ * @see TestAsyncTableAdminApi2 This test and it used to be joined it was 
taking longer than our
+ * ten minute timeout so they were split.
  */
 @RunWith(Parameterized.class)
 @Category({ LargeTests.class, ClientTests.class })
@@ -591,161 +588,6 @@ public class TestAsyncTableAdminApi extends 
TestAsyncAdminBase {
   }
 
   @Test
-  public void testDisableCatalogTable() throws Exception {
-    try {
-      this.admin.disableTable(TableName.META_TABLE_NAME).join();
-      fail("Expected to throw ConstraintException");
-    } catch (Exception e) {
-    }
-    // Before the fix for HBASE-6146, the below table creation was failing as 
the hbase:meta table
-    // actually getting disabled by the disableTable() call.
-    createTableWithDefaultConf(tableName);
-  }
-
-  @Test
-  public void testAddColumnFamily() throws Exception {
-    // Create a table with two families
-    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
-    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
-    admin.createTable(builder.build()).join();
-    admin.disableTable(tableName).join();
-    // Verify the table descriptor
-    verifyTableDescriptor(tableName, FAMILY_0);
-
-    // Modify the table removing one family and verify the descriptor
-    admin.addColumnFamily(tableName, 
ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join();
-    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
-  }
-
-  @Test
-  public void testAddSameColumnFamilyTwice() throws Exception {
-    // Create a table with one families
-    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
-    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
-    admin.createTable(builder.build()).join();
-    admin.disableTable(tableName).join();
-    // Verify the table descriptor
-    verifyTableDescriptor(tableName, FAMILY_0);
-
-    // Modify the table removing one family and verify the descriptor
-    admin.addColumnFamily(tableName, 
ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join();
-    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
-
-    try {
-      // Add same column family again - expect failure
-      this.admin.addColumnFamily(tableName, 
ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join();
-      Assert.fail("Delete a non-exist column family should fail");
-    } catch (Exception e) {
-      // Expected.
-    }
-  }
-
-  @Test
-  public void testModifyColumnFamily() throws Exception {
-    TableDescriptorBuilder tdBuilder = 
TableDescriptorBuilder.newBuilder(tableName);
-    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(FAMILY_0);
-    int blockSize = cfd.getBlocksize();
-    admin.createTable(tdBuilder.addColumnFamily(cfd).build()).join();
-    admin.disableTable(tableName).join();
-    // Verify the table descriptor
-    verifyTableDescriptor(tableName, FAMILY_0);
-
-    int newBlockSize = 2 * blockSize;
-    cfd = 
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_0).setBlocksize(newBlockSize).build();
-    // Modify colymn family
-    admin.modifyColumnFamily(tableName, cfd).join();
-
-    TableDescriptor htd = admin.getDescriptor(tableName).get();
-    ColumnFamilyDescriptor hcfd = htd.getColumnFamily(FAMILY_0);
-    assertTrue(hcfd.getBlocksize() == newBlockSize);
-  }
-
-  @Test
-  public void testModifyNonExistingColumnFamily() throws Exception {
-    TableDescriptorBuilder tdBuilder = 
TableDescriptorBuilder.newBuilder(tableName);
-    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(FAMILY_0);
-    int blockSize = cfd.getBlocksize();
-    admin.createTable(tdBuilder.addColumnFamily(cfd).build()).join();
-    admin.disableTable(tableName).join();
-    // Verify the table descriptor
-    verifyTableDescriptor(tableName, FAMILY_0);
-
-    int newBlockSize = 2 * blockSize;
-    cfd = 
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(newBlockSize).build();
-
-    // Modify a column family that is not in the table.
-    try {
-      admin.modifyColumnFamily(tableName, cfd).join();
-      Assert.fail("Modify a non-exist column family should fail");
-    } catch (Exception e) {
-      // Expected.
-    }
-  }
-
-  @Test
-  public void testDeleteColumnFamily() throws Exception {
-    // Create a table with two families
-    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
-    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
-        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
-    admin.createTable(builder.build()).join();
-    admin.disableTable(tableName).join();
-    // Verify the table descriptor
-    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
-
-    // Modify the table removing one family and verify the descriptor
-    admin.deleteColumnFamily(tableName, FAMILY_1).join();
-    verifyTableDescriptor(tableName, FAMILY_0);
-  }
-
-  @Test
-  public void testDeleteSameColumnFamilyTwice() throws Exception {
-    // Create a table with two families
-    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
-    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
-        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
-    admin.createTable(builder.build()).join();
-    admin.disableTable(tableName).join();
-    // Verify the table descriptor
-    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
-
-    // Modify the table removing one family and verify the descriptor
-    admin.deleteColumnFamily(tableName, FAMILY_1).join();
-    verifyTableDescriptor(tableName, FAMILY_0);
-
-    try {
-      // Delete again - expect failure
-      admin.deleteColumnFamily(tableName, FAMILY_1).join();
-      Assert.fail("Delete a non-exist column family should fail");
-    } catch (Exception e) {
-      // Expected.
-    }
-  }
-
-  private void verifyTableDescriptor(final TableName tableName, final 
byte[]... families)
-      throws Exception {
-    // Verify descriptor from master
-    TableDescriptor htd = admin.getDescriptor(tableName).get();
-    verifyTableDescriptor(htd, tableName, families);
-
-    // Verify descriptor from HDFS
-    MasterFileSystem mfs = 
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
-    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
-    TableDescriptor td = 
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
-    verifyTableDescriptor(td, tableName, families);
-  }
-
-  private void verifyTableDescriptor(final TableDescriptor htd, final 
TableName tableName,
-      final byte[]... families) {
-    Set<byte[]> htdFamilies = htd.getColumnFamilyNames();
-    assertEquals(tableName, htd.getTableName());
-    assertEquals(families.length, htdFamilies.size());
-    for (byte[] familyName : families) {
-      assertTrue("Expected family " + Bytes.toString(familyName), 
htdFamilies.contains(familyName));
-    }
-  }
-
-  @Test
   public void testIsTableEnabledAndDisabled() throws Exception {
     createTableWithDefaultConf(tableName);
     assertTrue(admin.isTableEnabled(tableName).get());
@@ -754,91 +596,4 @@ public class TestAsyncTableAdminApi extends 
TestAsyncAdminBase {
     assertFalse(admin.isTableEnabled(tableName).get());
     assertTrue(admin.isTableDisabled(tableName).get());
   }
-
-  @Test
-  public void testTableAvailableWithRandomSplitKeys() throws Exception {
-    createTableWithDefaultConf(tableName);
-    byte[][] splitKeys = new byte[1][];
-    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 } 
};
-    boolean tableAvailable = admin.isTableAvailable(tableName, 
splitKeys).get();
-    assertFalse("Table should be created with 1 row in META", tableAvailable);
-  }
-
-  @Test
-  public void testCompactionTimestamps() throws Exception {
-    createTableWithDefaultConf(tableName);
-    AsyncTable<?> table = ASYNC_CONN.getTable(tableName);
-    Optional<Long> ts = admin.getLastMajorCompactionTimestamp(tableName).get();
-    assertFalse(ts.isPresent());
-    Put p = new Put(Bytes.toBytes("row1"));
-    p.addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes("v"));
-    table.put(p).join();
-    ts = admin.getLastMajorCompactionTimestamp(tableName).get();
-    // no files written -> no data
-    assertFalse(ts.isPresent());
-
-    admin.flush(tableName).join();
-    ts = admin.getLastMajorCompactionTimestamp(tableName).get();
-    // still 0, we flushed a file, but no major compaction happened
-    assertFalse(ts.isPresent());
-
-    byte[] regionName = ASYNC_CONN.getRegionLocator(tableName)
-        
.getRegionLocation(Bytes.toBytes("row1")).get().getRegion().getRegionName();
-    Optional<Long> ts1 = 
admin.getLastMajorCompactionTimestampForRegion(regionName).get();
-    assertFalse(ts1.isPresent());
-    p = new Put(Bytes.toBytes("row2"));
-    p.addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes("v"));
-    table.put(p).join();
-    admin.flush(tableName).join();
-    ts1 = admin.getLastMajorCompactionTimestamp(tableName).get();
-    // make sure the region API returns the same value, as the old file is 
still around
-    assertFalse(ts1.isPresent());
-
-    for (int i = 0; i < 3; i++) {
-      table.put(p).join();
-      admin.flush(tableName).join();
-    }
-    admin.majorCompact(tableName).join();
-    long curt = System.currentTimeMillis();
-    long waitTime = 10000;
-    long endt = curt + waitTime;
-    CompactionState state = admin.getCompactionState(tableName).get();
-    LOG.info("Current compaction state 1 is " + state);
-    while (state == CompactionState.NONE && curt < endt) {
-      Thread.sleep(100);
-      state = admin.getCompactionState(tableName).get();
-      curt = System.currentTimeMillis();
-      LOG.info("Current compaction state 2 is " + state);
-    }
-    // Now, should have the right compaction state, let's wait until the 
compaction is done
-    if (state == CompactionState.MAJOR) {
-      state = admin.getCompactionState(tableName).get();
-      LOG.info("Current compaction state 3 is " + state);
-      while (state != CompactionState.NONE && curt < endt) {
-        Thread.sleep(10);
-        state = admin.getCompactionState(tableName).get();
-        LOG.info("Current compaction state 4 is " + state);
-      }
-    }
-    // Sleep to wait region server report
-    Thread
-        
.sleep(TEST_UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3 
* 1000) * 2);
-
-    ts = admin.getLastMajorCompactionTimestamp(tableName).get();
-    // after a compaction our earliest timestamp will have progressed forward
-    assertTrue(ts.isPresent());
-    assertTrue(ts.get() > 0);
-    // region api still the same
-    ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get();
-    assertTrue(ts1.isPresent());
-    assertEquals(ts.get(), ts1.get());
-    table.put(p).join();
-    admin.flush(tableName).join();
-    ts = admin.getLastMajorCompactionTimestamp(tableName).join();
-    assertTrue(ts.isPresent());
-    assertEquals(ts.get(), ts1.get());
-    ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get();
-    assertTrue(ts1.isPresent());
-    assertEquals(ts.get(), ts1.get());
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4622ffa/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
new file mode 100644
index 0000000..180f95b
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
@@ -0,0 +1,299 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Assert;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.util.Optional;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Class to test asynchronous table admin operations
+ * @see TestAsyncTableAdminApi This test and it used to be joined it was 
taking longer than our
+ * ten minute timeout so they were split.
+ */
+@RunWith(Parameterized.class)
+@Category({ LargeTests.class, ClientTests.class })
+public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestAsyncTableAdminApi2.class);
+
+  @Test
+  public void testDisableCatalogTable() throws Exception {
+    try {
+      this.admin.disableTable(TableName.META_TABLE_NAME).join();
+      fail("Expected to throw ConstraintException");
+    } catch (Exception e) {
+    }
+    // Before the fix for HBASE-6146, the below table creation was failing as 
the hbase:meta table
+    // actually getting disabled by the disableTable() call.
+    createTableWithDefaultConf(tableName);
+  }
+
+  @Test
+  public void testAddColumnFamily() throws Exception {
+    // Create a table with two families
+    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
+    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
+    admin.createTable(builder.build()).join();
+    admin.disableTable(tableName).join();
+    // Verify the table descriptor
+    verifyTableDescriptor(tableName, FAMILY_0);
+
+    // Modify the table removing one family and verify the descriptor
+    admin.addColumnFamily(tableName, 
ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join();
+    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
+  }
+
+  @Test
+  public void testAddSameColumnFamilyTwice() throws Exception {
+    // Create a table with one families
+    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
+    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
+    admin.createTable(builder.build()).join();
+    admin.disableTable(tableName).join();
+    // Verify the table descriptor
+    verifyTableDescriptor(tableName, FAMILY_0);
+
+    // Modify the table removing one family and verify the descriptor
+    admin.addColumnFamily(tableName, 
ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join();
+    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
+
+    try {
+      // Add same column family again - expect failure
+      this.admin.addColumnFamily(tableName, 
ColumnFamilyDescriptorBuilder.of(FAMILY_1)).join();
+      Assert.fail("Delete a non-exist column family should fail");
+    } catch (Exception e) {
+      // Expected.
+    }
+  }
+
+  @Test
+  public void testModifyColumnFamily() throws Exception {
+    TableDescriptorBuilder tdBuilder = 
TableDescriptorBuilder.newBuilder(tableName);
+    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(FAMILY_0);
+    int blockSize = cfd.getBlocksize();
+    admin.createTable(tdBuilder.addColumnFamily(cfd).build()).join();
+    admin.disableTable(tableName).join();
+    // Verify the table descriptor
+    verifyTableDescriptor(tableName, FAMILY_0);
+
+    int newBlockSize = 2 * blockSize;
+    cfd = 
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_0).setBlocksize(newBlockSize).build();
+    // Modify colymn family
+    admin.modifyColumnFamily(tableName, cfd).join();
+
+    TableDescriptor htd = admin.getDescriptor(tableName).get();
+    ColumnFamilyDescriptor hcfd = htd.getColumnFamily(FAMILY_0);
+    assertTrue(hcfd.getBlocksize() == newBlockSize);
+  }
+
+  @Test
+  public void testModifyNonExistingColumnFamily() throws Exception {
+    TableDescriptorBuilder tdBuilder = 
TableDescriptorBuilder.newBuilder(tableName);
+    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(FAMILY_0);
+    int blockSize = cfd.getBlocksize();
+    admin.createTable(tdBuilder.addColumnFamily(cfd).build()).join();
+    admin.disableTable(tableName).join();
+    // Verify the table descriptor
+    verifyTableDescriptor(tableName, FAMILY_0);
+
+    int newBlockSize = 2 * blockSize;
+    cfd = 
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(newBlockSize).build();
+
+    // Modify a column family that is not in the table.
+    try {
+      admin.modifyColumnFamily(tableName, cfd).join();
+      Assert.fail("Modify a non-exist column family should fail");
+    } catch (Exception e) {
+      // Expected.
+    }
+  }
+
+  @Test
+  public void testDeleteColumnFamily() throws Exception {
+    // Create a table with two families
+    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
+    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
+        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
+    admin.createTable(builder.build()).join();
+    admin.disableTable(tableName).join();
+    // Verify the table descriptor
+    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
+
+    // Modify the table removing one family and verify the descriptor
+    admin.deleteColumnFamily(tableName, FAMILY_1).join();
+    verifyTableDescriptor(tableName, FAMILY_0);
+  }
+
+  @Test
+  public void testDeleteSameColumnFamilyTwice() throws Exception {
+    // Create a table with two families
+    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
+    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
+        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
+    admin.createTable(builder.build()).join();
+    admin.disableTable(tableName).join();
+    // Verify the table descriptor
+    verifyTableDescriptor(tableName, FAMILY_0, FAMILY_1);
+
+    // Modify the table removing one family and verify the descriptor
+    admin.deleteColumnFamily(tableName, FAMILY_1).join();
+    verifyTableDescriptor(tableName, FAMILY_0);
+
+    try {
+      // Delete again - expect failure
+      admin.deleteColumnFamily(tableName, FAMILY_1).join();
+      Assert.fail("Delete a non-exist column family should fail");
+    } catch (Exception e) {
+      // Expected.
+    }
+  }
+
+  private void verifyTableDescriptor(final TableName tableName, final 
byte[]... families)
+      throws Exception {
+    // Verify descriptor from master
+    TableDescriptor htd = admin.getDescriptor(tableName).get();
+    verifyTableDescriptor(htd, tableName, families);
+
+    // Verify descriptor from HDFS
+    MasterFileSystem mfs = 
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
+    TableDescriptor td = 
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
+    verifyTableDescriptor(td, tableName, families);
+  }
+
+  private void verifyTableDescriptor(final TableDescriptor htd, final 
TableName tableName,
+      final byte[]... families) {
+    Set<byte[]> htdFamilies = htd.getColumnFamilyNames();
+    assertEquals(tableName, htd.getTableName());
+    assertEquals(families.length, htdFamilies.size());
+    for (byte[] familyName : families) {
+      assertTrue("Expected family " + Bytes.toString(familyName), 
htdFamilies.contains(familyName));
+    }
+  }
+
+
+  @Test
+  public void testTableAvailableWithRandomSplitKeys() throws Exception {
+    createTableWithDefaultConf(tableName);
+    byte[][] splitKeys = new byte[1][];
+    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 } 
};
+    boolean tableAvailable = admin.isTableAvailable(tableName, 
splitKeys).get();
+    assertFalse("Table should be created with 1 row in META", tableAvailable);
+  }
+
+  @Test
+  public void testCompactionTimestamps() throws Exception {
+    createTableWithDefaultConf(tableName);
+    AsyncTable<?> table = ASYNC_CONN.getTable(tableName);
+    Optional<Long> ts = admin.getLastMajorCompactionTimestamp(tableName).get();
+    assertFalse(ts.isPresent());
+    Put p = new Put(Bytes.toBytes("row1"));
+    p.addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes("v"));
+    table.put(p).join();
+    ts = admin.getLastMajorCompactionTimestamp(tableName).get();
+    // no files written -> no data
+    assertFalse(ts.isPresent());
+
+    admin.flush(tableName).join();
+    ts = admin.getLastMajorCompactionTimestamp(tableName).get();
+    // still 0, we flushed a file, but no major compaction happened
+    assertFalse(ts.isPresent());
+
+    byte[] regionName = ASYNC_CONN.getRegionLocator(tableName)
+        
.getRegionLocation(Bytes.toBytes("row1")).get().getRegion().getRegionName();
+    Optional<Long> ts1 = 
admin.getLastMajorCompactionTimestampForRegion(regionName).get();
+    assertFalse(ts1.isPresent());
+    p = new Put(Bytes.toBytes("row2"));
+    p.addColumn(FAMILY, Bytes.toBytes("q"), Bytes.toBytes("v"));
+    table.put(p).join();
+    admin.flush(tableName).join();
+    ts1 = admin.getLastMajorCompactionTimestamp(tableName).get();
+    // make sure the region API returns the same value, as the old file is 
still around
+    assertFalse(ts1.isPresent());
+
+    for (int i = 0; i < 3; i++) {
+      table.put(p).join();
+      admin.flush(tableName).join();
+    }
+    admin.majorCompact(tableName).join();
+    long curt = System.currentTimeMillis();
+    long waitTime = 10000;
+    long endt = curt + waitTime;
+    CompactionState state = admin.getCompactionState(tableName).get();
+    LOG.info("Current compaction state 1 is " + state);
+    while (state == CompactionState.NONE && curt < endt) {
+      Thread.sleep(100);
+      state = admin.getCompactionState(tableName).get();
+      curt = System.currentTimeMillis();
+      LOG.info("Current compaction state 2 is " + state);
+    }
+    // Now, should have the right compaction state, let's wait until the 
compaction is done
+    if (state == CompactionState.MAJOR) {
+      state = admin.getCompactionState(tableName).get();
+      LOG.info("Current compaction state 3 is " + state);
+      while (state != CompactionState.NONE && curt < endt) {
+        Thread.sleep(10);
+        state = admin.getCompactionState(tableName).get();
+        LOG.info("Current compaction state 4 is " + state);
+      }
+    }
+    // Sleep to wait region server report
+    Thread
+        
.sleep(TEST_UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3 
* 1000) * 2);
+
+    ts = admin.getLastMajorCompactionTimestamp(tableName).get();
+    // after a compaction our earliest timestamp will have progressed forward
+    assertTrue(ts.isPresent());
+    assertTrue(ts.get() > 0);
+    // region api still the same
+    ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get();
+    assertTrue(ts1.isPresent());
+    assertEquals(ts.get(), ts1.get());
+    table.put(p).join();
+    admin.flush(tableName).join();
+    ts = admin.getLastMajorCompactionTimestamp(tableName).join();
+    assertTrue(ts.isPresent());
+    assertEquals(ts.get(), ts1.get());
+    ts1 = admin.getLastMajorCompactionTimestampForRegion(regionName).get();
+    assertTrue(ts1.isPresent());
+    assertEquals(ts.get(), ts1.get());
+  }
+}

Reply via email to