http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java
index 9480d38..1ec4636 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.exec;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
 import java.util.ArrayList;
@@ -27,16 +28,22 @@ import java.util.Set;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.CheckResult.PartitionResult;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Msck;
+import org.apache.hadoop.hive.metastore.PartitionDropOptions;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetastoreException;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.RetryUtilities;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
-import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hive.common.util.RetryUtilities.RetryException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -47,57 +54,71 @@ import org.mockito.Mockito;
 
 /**
  * Unit test for function dropPartitionsInBatches in DDLTask.
- *
  **/
 public class TestMsckDropPartitionsInBatches {
   private static HiveConf hiveConf;
-  private static DDLTask ddlTask;
+  private static Msck msck;
+  private final String catName = "hive";
+  private final String dbName = "default";
   private final String tableName = "test_msck_batch";
-  private static Hive db;
+  private static IMetaStoreClient db;
   private List<String> repairOutput;
   private Table table;
 
   @BeforeClass
-  public static void setupClass() throws HiveException {
+  public static void setupClass() throws Exception {
     hiveConf = new HiveConf(TestMsckCreatePartitionsInBatches.class);
     hiveConf.setIntVar(ConfVars.HIVE_MSCK_REPAIR_BATCH_SIZE, 5);
     hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
-        
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
+      
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
     SessionState.start(hiveConf);
-    db = Hive.get(hiveConf);
-    ddlTask = new DDLTask();
+    db = new HiveMetaStoreClient(hiveConf);
+    msck = new Msck( false, false);
+    msck.init(hiveConf);
   }
 
   @Before
   public void before() throws Exception {
-    createPartitionedTable("default", tableName);
-    table = db.getTable(tableName);
+    createPartitionedTable(catName, dbName, tableName);
+    table = db.getTable(catName, dbName, tableName);
     repairOutput = new ArrayList<String>();
   }
 
   @After
   public void after() throws Exception {
-    cleanUpTableQuietly("default", tableName);
+    cleanUpTableQuietly(catName, dbName, tableName);
   }
 
-  private Table createPartitionedTable(String dbName, String tableName) throws 
Exception {
+  private Table createPartitionedTable(String catName, String dbName, String 
tableName) throws Exception {
     try {
-      db.dropTable(dbName, tableName);
-      db.createTable(tableName, Arrays.asList("key", "value"), // Data columns.
-          Arrays.asList("city"), // Partition columns.
-          TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class);
-      return db.getTable(dbName, tableName);
+      db.dropTable(catName, dbName, tableName);
+      Table table = new Table();
+      table.setCatName(catName);
+      table.setDbName(dbName);
+      table.setTableName(tableName);
+      FieldSchema col1 = new FieldSchema("key", "string", "");
+      FieldSchema col2 = new FieldSchema("value", "int", "");
+      FieldSchema col3 = new FieldSchema("city", "string", "");
+      StorageDescriptor sd = new StorageDescriptor();
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.setInputFormat(TextInputFormat.class.getCanonicalName());
+      
sd.setOutputFormat(HiveIgnoreKeyTextOutputFormat.class.getCanonicalName());
+      sd.setCols(Arrays.asList(col1, col2));
+      table.setPartitionKeys(Arrays.asList(col3));
+      table.setSd(sd);
+      db.createTable(table);
+      return db.getTable(catName, dbName, tableName);
     } catch (Exception exception) {
       fail("Unable to drop and create table " + StatsUtils
-          .getFullyQualifiedTableName(dbName, tableName) + " because " + 
StringUtils
-          .stringifyException(exception));
+        .getFullyQualifiedTableName(dbName, tableName) + " because " + 
StringUtils
+        .stringifyException(exception));
       throw exception;
     }
   }
 
-  private void cleanUpTableQuietly(String dbName, String tableName) {
+  private void cleanUpTableQuietly(String catName, String dbName, String 
tableName) {
     try {
-      db.dropTable(dbName, tableName, true, true, true);
+      db.dropTable(catName, dbName, tableName, true, true, true);
     } catch (Exception exception) {
       fail("Unexpected exception: " + 
StringUtils.stringifyException(exception));
     }
@@ -142,9 +163,10 @@ public class TestMsckDropPartitionsInBatches {
   private final int noException = 1;
   private final int oneException = 2;
   private final int allException = 3;
+
   private void runDropPartitions(int partCount, int batchSize, int maxRetries, 
int exceptionStatus)
-      throws Exception {
-    Hive spyDb = Mockito.spy(db);
+    throws Exception {
+    IMetaStoreClient spyDb = Mockito.spy(db);
 
     // create partCount dummy partitions
     Set<PartitionResult> partsNotInFs = dropPartsNotInFs(partCount);
@@ -163,13 +185,13 @@ public class TestMsckDropPartitionsInBatches {
 
     if (exceptionStatus == oneException) {
       // After one exception everything is expected to run
-      actualBatchSize = batchSize/2;
+      actualBatchSize = batchSize / 2;
     }
 
     if (exceptionStatus != allException) {
-      expectedCallCount = partCount/actualBatchSize;
+      expectedCallCount = partCount / actualBatchSize;
 
-      if (expectedCallCount*actualBatchSize < partCount) {
+      if (expectedCallCount * actualBatchSize < partCount) {
         // partCount not equally divided into batches.  last batch size will 
be less than batch size
         lastBatchSize = partCount - (expectedCallCount * actualBatchSize);
 
@@ -182,9 +204,10 @@ public class TestMsckDropPartitionsInBatches {
         expectedCallCount++;
 
         // only first call throws exception
-        
Mockito.doThrow(HiveException.class).doCallRealMethod().doCallRealMethod().when(spyDb)
-            .dropPartitions(Mockito.eq(table), Mockito.any(List.class), 
Mockito.eq(false),
-                Mockito.eq(true));
+        
Mockito.doThrow(MetastoreException.class).doCallRealMethod().doCallRealMethod().when(spyDb)
+          .dropPartitions(Mockito.eq(table.getCatName()), 
Mockito.eq(table.getDbName()),
+            Mockito.eq(table.getTableName()),
+            Mockito.any(List.class), Mockito.any(PartitionDropOptions.class));
       }
 
       expectedBatchSizes = new int[expectedCallCount];
@@ -195,15 +218,15 @@ public class TestMsckDropPartitionsInBatches {
       // second batch to last but one batch will be actualBatchSize
       // actualBatchSize is same as batchSize when no exceptions are expected
       // actualBatchSize is half of batchSize when 1 exception is expected
-      for (int i = 1; i < expectedCallCount-1; i++) {
+      for (int i = 1; i < expectedCallCount - 1; i++) {
         expectedBatchSizes[i] = Integer.min(partCount, actualBatchSize);
       }
 
-      expectedBatchSizes[expectedCallCount-1] = lastBatchSize;
+      expectedBatchSizes[expectedCallCount - 1] = lastBatchSize;
 
       // batch size from input and decaying factor of 2
-      ddlTask.dropPartitionsInBatches(spyDb, repairOutput, partsNotInFs, 
table, batchSize, 2,
-          maxRetries);
+      msck.dropPartitionsInBatches(spyDb, repairOutput, partsNotInFs, null, 
table, batchSize, 2,
+        maxRetries);
     } else {
       if (maxRetries == 0) {
         // Retries will be done till decaying factor reduces to 0.  Decaying 
Factor is 2.
@@ -219,35 +242,37 @@ public class TestMsckDropPartitionsInBatches {
         expectedBatchSizes[i] = Integer.min(partCount, actualBatchSize);
       }
       // all calls fail
-      Mockito.doThrow(HiveException.class).when(spyDb)
-          .dropPartitions(Mockito.eq(table), Mockito.any(List.class), 
Mockito.eq(false),
-              Mockito.eq(true));
+      Mockito.doThrow(MetastoreException.class).when(spyDb)
+        .dropPartitions(Mockito.eq(table.getCatName()), 
Mockito.eq(table.getDbName()), Mockito.eq(table.getTableName()),
+          Mockito.any(List.class), Mockito.any(PartitionDropOptions.class));
 
       Exception ex = null;
       try {
-        ddlTask.dropPartitionsInBatches(spyDb, repairOutput, partsNotInFs, 
table, batchSize, 2,
-            maxRetries);
+        msck.dropPartitionsInBatches(spyDb, repairOutput, partsNotInFs, null, 
table, batchSize, 2,
+          maxRetries);
       } catch (Exception retryEx) {
         ex = retryEx;
       }
       Assert.assertFalse("Exception was expected but was not thrown", ex == 
null);
-      Assert.assertTrue("Unexpected class of exception thrown", ex instanceof 
RetryException);
+      Assert.assertTrue("Unexpected class of exception thrown", ex instanceof 
RetryUtilities.RetryException);
     }
 
     // there should be expectedCallCount calls to drop partitions with each 
batch size of
     // actualBatchSize
     ArgumentCaptor<List> argument = ArgumentCaptor.forClass(List.class);
     Mockito.verify(spyDb, Mockito.times(expectedCallCount))
-        .dropPartitions(Mockito.eq(table), argument.capture(), 
Mockito.eq(false), Mockito.eq(true));
+      .dropPartitions(Mockito.eq(table.getCatName()), 
Mockito.eq(table.getDbName()), Mockito.eq(table.getTableName()),
+        argument.capture(), Mockito.any(PartitionDropOptions.class));
 
     // confirm the batch sizes were as expected
     List<List> droppedParts = argument.getAllValues();
 
+    assertEquals(expectedCallCount, droppedParts.size());
     for (int i = 0; i < expectedCallCount; i++) {
       Assert.assertEquals(
-          String.format("Unexpected batch size in attempt %d.  Expected: %d.  
Found: %d", i + 1,
-              expectedBatchSizes[i], droppedParts.get(i).size()),
-          expectedBatchSizes[i], droppedParts.get(i).size());
+        String.format("Unexpected batch size in attempt %d.  Expected: %d.  
Found: %d", i + 1,
+          expectedBatchSizes[i], droppedParts.get(i).size()),
+        expectedBatchSizes[i], droppedParts.get(i).size());
     }
   }
 
@@ -301,7 +326,7 @@ public class TestMsckDropPartitionsInBatches {
 
   /**
    * Tests the number of calls to dropPartitions and the respective batch 
sizes when first call to
-   * dropPartitions throws HiveException. The batch size should be reduced 
once by the
+   * dropPartitions throws MetastoreException. The batch size should be 
reduced once by the
    * decayingFactor 2, iow after batch size is halved.
    *
    * @throws Exception
@@ -313,7 +338,7 @@ public class TestMsckDropPartitionsInBatches {
 
   /**
    * Tests the retries exhausted case when Hive.DropPartitions method call 
always keep throwing
-   * HiveException. The batch sizes should exponentially decreased based on 
the decaying factor and
+   * MetastoreException. The batch sizes should exponentially decreased based 
on the decaying factor and
    * ultimately give up when it reaches 0.
    *
    * @throws Exception
@@ -325,6 +350,7 @@ public class TestMsckDropPartitionsInBatches {
 
   /**
    * Tests the maximum retry attempt is set to 2.
+   *
    * @throws Exception
    */
   @Test
@@ -334,6 +360,7 @@ public class TestMsckDropPartitionsInBatches {
 
   /**
    * Tests when max number of retries is set to 1.
+   *
    * @throws Exception
    */
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
index a2a0583..434d82a 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hive.ql.metadata;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,11 +31,14 @@ import java.util.Map;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.CheckResult;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreChecker;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.MetastoreException;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.mapred.TextInputFormat;
@@ -52,9 +57,11 @@ import com.google.common.collect.Lists;
 public class TestHiveMetaStoreChecker {
 
   private Hive hive;
+  private IMetaStoreClient msc;
   private FileSystem fs;
   private HiveMetaStoreChecker checker = null;
 
+  private final String catName = "hive";
   private final String dbName = "testhivemetastorechecker_db";
   private final String tableName = "testhivemetastorechecker_table";
 
@@ -69,7 +76,8 @@ public class TestHiveMetaStoreChecker {
     hive = Hive.get();
     
hive.getConf().setIntVar(HiveConf.ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT, 
15);
     hive.getConf().set(HiveConf.ConfVars.HIVE_MSCK_PATH_VALIDATION.varname, 
"throw");
-    checker = new HiveMetaStoreChecker(hive);
+    msc = new HiveMetaStoreClient(hive.getConf());
+    checker = new HiveMetaStoreChecker(msc, hive.getConf());
 
     partCols = new ArrayList<FieldSchema>();
     partCols.add(new FieldSchema(partDateName, 
serdeConstants.STRING_TYPE_NAME, ""));
@@ -92,11 +100,9 @@ public class TestHiveMetaStoreChecker {
   private void dropDbTable()  {
     // cleanup
     try {
-      hive.dropTable(dbName, tableName, true, true);
-      hive.dropDatabase(dbName, true, true, true);
-    } catch (NoSuchObjectException e) {
-      // ignore
-    } catch (HiveException e) {
+      msc.dropTable(catName, dbName, tableName, true, true);
+      msc.dropDatabase(catName, dbName, true, true, true);
+    } catch (TException e) {
       // ignore
     }
   }
@@ -108,28 +114,28 @@ public class TestHiveMetaStoreChecker {
   }
 
   @Test
-  public void testTableCheck() throws HiveException, MetaException,
-      IOException, TException, AlreadyExistsException {
+  public void testTableCheck() throws HiveException, IOException, TException, 
MetastoreException {
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, null, null, result);
+    checker.checkMetastore(catName, dbName, null, null, result);
     // we haven't added anything so should return an all ok
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     // check table only, should not exist in ms
     result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(1, result.getTablesNotInMs().size());
     assertEquals(tableName, result.getTablesNotInMs().iterator().next());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     Database db = new Database();
+    db.setCatalogName(catName);
     db.setName(dbName);
-    hive.createDatabase(db);
+    msc.createDatabase(db);
 
     Table table = new Table(dbName, tableName);
     table.setDbName(dbName);
@@ -142,19 +148,19 @@ public class TestHiveMetaStoreChecker {
     // now we've got a table, check that it works
     // first check all (1) tables
     result = new CheckResult();
-    checker.checkMetastore(dbName, null, null, result);
+    checker.checkMetastore(catName, dbName, null, null, result);
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     // then let's check the one we know about
     result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     // remove the table folder
     fs = table.getPath().getFileSystem(hive.getConf());
@@ -162,12 +168,12 @@ public class TestHiveMetaStoreChecker {
 
     // now this shouldn't find the path on the fs
     result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
-    assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());;
+    checker.checkMetastore(catName, dbName, tableName, null, result);
+    assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(1, result.getTablesNotOnFs().size());
     assertEquals(tableName, result.getTablesNotOnFs().iterator().next());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     // put it back and one additional table
     fs.mkdirs(table.getPath());
@@ -178,12 +184,12 @@ public class TestHiveMetaStoreChecker {
 
     // find the extra table
     result = new CheckResult();
-    checker.checkMetastore(dbName, null, null, result);
+    checker.checkMetastore(catName, dbName, null, null, result);
     assertEquals(1, result.getTablesNotInMs().size());
     assertEquals(fakeTable.getName(), 
Lists.newArrayList(result.getTablesNotInMs()).get(0));
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     // create a new external table
     hive.dropTable(dbName, tableName);
@@ -192,11 +198,11 @@ public class TestHiveMetaStoreChecker {
 
     // should return all ok
     result = new CheckResult();
-    checker.checkMetastore(dbName, null, null, result);
+    checker.checkMetastore(catName, dbName, null, null, result);
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
   }
 
   /*
@@ -205,7 +211,7 @@ public class TestHiveMetaStoreChecker {
    */
   @Test
   public void testAdditionalPartitionDirs()
-      throws HiveException, AlreadyExistsException, IOException {
+    throws HiveException, AlreadyExistsException, IOException, 
MetastoreException {
     Table table = createTestTable();
     List<Partition> partitions = hive.getPartitions(table);
     assertEquals(2, partitions.size());
@@ -216,16 +222,17 @@ public class TestHiveMetaStoreChecker {
     fs.mkdirs(fakePart);
     fs.deleteOnExit(fakePart);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String> emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), 
result.getPartitionsNotOnFs());
     //fakePart path partition is added since the defined partition keys are 
valid
     assertEquals(1, result.getPartitionsNotInMs().size());
   }
 
-  @Test(expected = HiveException.class)
-  public void testInvalidPartitionKeyName() throws HiveException, 
AlreadyExistsException, IOException {
+  @Test(expected = MetastoreException.class)
+  public void testInvalidPartitionKeyName()
+    throws HiveException, AlreadyExistsException, IOException, 
MetastoreException {
     Table table = createTestTable();
     List<Partition> partitions = hive.getPartitions(table);
     assertEquals(2, partitions.size());
@@ -235,7 +242,7 @@ public class TestHiveMetaStoreChecker {
         "fakedate=2009-01-01/fakecity=sanjose");
     fs.mkdirs(fakePart);
     fs.deleteOnExit(fakePart);
-    checker.checkMetastore(dbName, tableName, null, new CheckResult());
+    checker.checkMetastore(catName, dbName, tableName, null, new 
CheckResult());
   }
 
   /*
@@ -244,9 +251,9 @@ public class TestHiveMetaStoreChecker {
    */
   @Test
   public void testSkipInvalidPartitionKeyName()
-      throws HiveException, AlreadyExistsException, IOException {
+    throws HiveException, AlreadyExistsException, IOException, 
MetastoreException {
     hive.getConf().set(HiveConf.ConfVars.HIVE_MSCK_PATH_VALIDATION.varname, 
"skip");
-    checker = new HiveMetaStoreChecker(hive);
+    checker = new HiveMetaStoreChecker(msc, hive.getConf());
     Table table = createTestTable();
     List<Partition> partitions = hive.getPartitions(table);
     assertEquals(2, partitions.size());
@@ -258,18 +265,18 @@ public class TestHiveMetaStoreChecker {
     fs.deleteOnExit(fakePart);
     createPartitionsDirectoriesOnFS(table, 2);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String> emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), 
result.getPartitionsNotOnFs());
     // only 2 valid partitions should be added
     assertEquals(2, result.getPartitionsNotInMs().size());
   }
 
-  private Table createTestTable() throws AlreadyExistsException, HiveException 
{
+  private Table createTestTable() throws HiveException, AlreadyExistsException 
{
     Database db = new Database();
     db.setName(dbName);
-    hive.createDatabase(db);
+    hive.createDatabase(db, true);
 
     Table table = new Table(dbName, tableName);
     table.setDbName(dbName);
@@ -289,17 +296,17 @@ public class TestHiveMetaStoreChecker {
   }
 
   @Test
-  public void testPartitionsCheck() throws HiveException, MetaException,
-      IOException, TException, AlreadyExistsException {
+  public void testPartitionsCheck() throws HiveException,
+    IOException, TException, MetastoreException {
     Table table = createTestTable();
 
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     // all is well
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     List<Partition> partitions = hive.getPartitions(table);
     assertEquals(2, partitions.size());
@@ -313,7 +320,7 @@ public class TestHiveMetaStoreChecker {
     fs.delete(partToRemovePath, true);
 
     result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     // missing one partition on fs
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
@@ -322,17 +329,17 @@ public class TestHiveMetaStoreChecker {
         .getPartitionName());
     assertEquals(partToRemove.getTable().getTableName(),
         result.getPartitionsNotOnFs().iterator().next().getTableName());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     List<Map<String, String>> partsCopy = new ArrayList<Map<String, String>>();
     partsCopy.add(partitions.get(1).getSpec());
     // check only the partition that exists, all should be well
     result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, partsCopy, result);
+    checker.checkMetastore(catName, dbName, tableName, partsCopy, result);
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs());
 
     // old test is moved to msck_repair_2.q
 
@@ -340,17 +347,17 @@ public class TestHiveMetaStoreChecker {
     hive.dropTable(dbName, tableName, true, true);
     hive.createTable(table);
     result = new CheckResult();
-    checker.checkMetastore(dbName, null, null, result);
+    checker.checkMetastore(catName, dbName, null, null, result);
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotInMs()); //--0e
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotInMs()); //--0e
     System.err.println("Test completed - partition check");
   }
 
   @Test
-  public void testDataDeletion() throws HiveException, MetaException,
-      IOException, TException, AlreadyExistsException, NoSuchObjectException {
+  public void testDataDeletion() throws HiveException,
+    IOException, TException {
 
     Database db = new Database();
     db.setName(dbName);
@@ -386,15 +393,15 @@ public class TestHiveMetaStoreChecker {
    * Test multi-threaded implementation of checker to find out missing 
partitions
    */
   @Test
-  public void testPartitionsNotInMs() throws HiveException, 
AlreadyExistsException, IOException {
+  public void testPartitionsNotInMs() throws HiveException, 
AlreadyExistsException, IOException, MetastoreException {
     Table testTable = createPartitionedTestTable(dbName, tableName, 2, 0);
     // add 10 partitions on the filesystem
     createPartitionsDirectoriesOnFS(testTable, 10);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String>emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String>emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String>emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult>emptySet(), 
result.getPartitionsNotOnFs());
     assertEquals(10, result.getPartitionsNotInMs().size());
   }
 
@@ -403,17 +410,17 @@ public class TestHiveMetaStoreChecker {
    */
   @Test
   public void testSingleThreadedCheckMetastore()
-      throws HiveException, AlreadyExistsException, IOException {
+    throws HiveException, AlreadyExistsException, IOException, 
MetastoreException {
     // set num of threads to 0 so that single-threaded checkMetastore is called
     
hive.getConf().setIntVar(HiveConf.ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT, 
0);
     Table testTable = createPartitionedTestTable(dbName, tableName, 2, 0);
     // add 10 partitions on the filesystem
     createPartitionsDirectoriesOnFS(testTable, 10);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String> emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), 
result.getPartitionsNotOnFs());
     assertEquals(10, result.getPartitionsNotInMs().size());
   }
 
@@ -426,7 +433,7 @@ public class TestHiveMetaStoreChecker {
    */
   @Test
   public void testSingleThreadedDeeplyNestedTables()
-      throws HiveException, AlreadyExistsException, IOException {
+    throws HiveException, AlreadyExistsException, IOException, 
MetastoreException {
     // set num of threads to 0 so that single-threaded checkMetastore is called
     
hive.getConf().setIntVar(HiveConf.ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT, 
0);
     int poolSize = 2;
@@ -435,10 +442,10 @@ public class TestHiveMetaStoreChecker {
     // add 10 partitions on the filesystem
     createPartitionsDirectoriesOnFS(testTable, 10);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String> emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), 
result.getPartitionsNotOnFs());
     assertEquals(10, result.getPartitionsNotInMs().size());
   }
 
@@ -451,7 +458,7 @@ public class TestHiveMetaStoreChecker {
    */
   @Test
   public void testDeeplyNestedPartitionedTables()
-      throws HiveException, AlreadyExistsException, IOException {
+    throws HiveException, AlreadyExistsException, IOException, 
MetastoreException {
     
hive.getConf().setIntVar(HiveConf.ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT, 
2);
     int poolSize = 2;
     // create a deeply nested table which has more partition keys than the 
pool size
@@ -459,10 +466,10 @@ public class TestHiveMetaStoreChecker {
     // add 10 partitions on the filesystem
     createPartitionsDirectoriesOnFS(testTable, 10);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String> emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), 
result.getPartitionsNotOnFs());
     assertEquals(10, result.getPartitionsNotInMs().size());
   }
 
@@ -487,20 +494,20 @@ public class TestHiveMetaStoreChecker {
     CheckResult result = new CheckResult();
     Exception exception = null;
     try {
-      checker.checkMetastore(dbName, tableName, null, result);
+      checker.checkMetastore(catName, dbName, tableName, null, result);
     } catch (Exception e) {
       exception = e;
     }
-    assertTrue("Expected HiveException", exception!=null && exception 
instanceof HiveException);
+    assertTrue("Expected MetastoreException", exception!=null && exception 
instanceof MetastoreException);
     createFile(sb.toString(), "dummyFile");
     result = new CheckResult();
     exception = null;
     try {
-      checker.checkMetastore(dbName, tableName, null, result);
+      checker.checkMetastore(catName, dbName, tableName, null, result);
     } catch (Exception e) {
       exception = e;
     }
-    assertTrue("Expected HiveException", exception!=null && exception 
instanceof HiveException);
+    assertTrue("Expected MetastoreException", exception!=null && exception 
instanceof MetastoreException);
   }
 
   /**
@@ -511,14 +518,14 @@ public class TestHiveMetaStoreChecker {
    * @throws HiveException
    * @throws IOException
    */
-  @Test(expected = HiveException.class)
+  @Test(expected = MetastoreException.class)
   public void testInvalidOrderForPartitionKeysOnFS()
-      throws AlreadyExistsException, HiveException, IOException {
+    throws AlreadyExistsException, HiveException, IOException, 
MetastoreException {
     Table testTable = createPartitionedTestTable(dbName, tableName, 2, 0);
     // add 10 partitions on the filesystem
     createInvalidPartitionDirsOnFS(testTable, 10);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
   }
 
   /*
@@ -527,19 +534,19 @@ public class TestHiveMetaStoreChecker {
    */
   @Test
   public void testSkipInvalidOrderForPartitionKeysOnFS()
-      throws AlreadyExistsException, HiveException, IOException {
+    throws AlreadyExistsException, HiveException, IOException, 
MetastoreException {
     hive.getConf().set(HiveConf.ConfVars.HIVE_MSCK_PATH_VALIDATION.varname, 
"skip");
-    checker = new HiveMetaStoreChecker(hive);
+    checker = new HiveMetaStoreChecker(msc, hive.getConf());
     Table testTable = createPartitionedTestTable(dbName, tableName, 2, 0);
     // add 10 partitions on the filesystem
     createInvalidPartitionDirsOnFS(testTable, 2);
     // add 10 partitions on the filesystem
     createPartitionsDirectoriesOnFS(testTable, 2);
     CheckResult result = new CheckResult();
-    checker.checkMetastore(dbName, tableName, null, result);
+    checker.checkMetastore(catName, dbName, tableName, null, result);
     assertEquals(Collections.<String> emptySet(), result.getTablesNotInMs());
     assertEquals(Collections.<String> emptySet(), result.getTablesNotOnFs());
-    assertEquals(Collections.<String> emptySet(), 
result.getPartitionsNotOnFs());
+    assertEquals(Collections.<CheckResult.PartitionResult> emptySet(), 
result.getPartitionsNotOnFs());
     // only 2 valid partitions should be added
     assertEquals(2, result.getPartitionsNotInMs().size());
   }
@@ -565,20 +572,20 @@ public class TestHiveMetaStoreChecker {
     CheckResult result = new CheckResult();
     Exception exception = null;
     try {
-      checker.checkMetastore(dbName, tableName, null, result);
+      checker.checkMetastore(catName, dbName, tableName, null, result);
     } catch (Exception e) {
       exception = e;
     }
-    assertTrue("Expected HiveException", exception!=null && exception 
instanceof HiveException);
+    assertTrue("Expected MetastoreException", exception!=null && exception 
instanceof MetastoreException);
     createFile(sb.toString(), "dummyFile");
     result = new CheckResult();
     exception = null;
     try {
-      checker.checkMetastore(dbName, tableName, null, result);
+      checker.checkMetastore(catName, dbName, tableName, null, result);
     } catch (Exception e) {
       exception = e;
     }
-    assertTrue("Expected HiveException", exception!=null && exception 
instanceof HiveException);
+    assertTrue("Expected MetastoreException", exception!=null && exception 
instanceof MetastoreException);
   }
   /**
    * Creates a test partitioned table with the required level of nested 
partitions and number of
@@ -597,7 +604,7 @@ public class TestHiveMetaStoreChecker {
       int valuesPerPartition) throws AlreadyExistsException, HiveException {
     Database db = new Database();
     db.setName(dbName);
-    hive.createDatabase(db);
+    hive.createDatabase(db, true);
 
     Table table = new Table(dbName, tableName);
     table.setDbName(dbName);
@@ -611,7 +618,7 @@ public class TestHiveMetaStoreChecker {
     }
     table.setPartCols(partKeys);
     // create table
-    hive.createTable(table);
+    hive.createTable(table, true);
     table = hive.getTable(dbName, tableName);
     if (valuesPerPartition == 0) {
       return table;

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/queries/clientpositive/msck_repair_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/msck_repair_acid.q 
b/ql/src/test/queries/clientpositive/msck_repair_acid.q
new file mode 100644
index 0000000..369095d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/msck_repair_acid.q
@@ -0,0 +1,34 @@
+set hive.msck.repair.batch.size=1;
+set hive.mv.files.thread=0;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+DROP TABLE IF EXISTS repairtable_n6;
+
+CREATE TABLE repairtable_n6(col STRING) PARTITIONED BY (p1 STRING, p2 STRING) 
STORED AS ORC tblproperties ("transactional"="true", 
"transactional_properties"="insert_only");
+
+EXPLAIN LOCKS MSCK TABLE repairtable_n6;
+MSCK TABLE repairtable_n6;
+
+show partitions repairtable_n6;
+
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n6/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n6/p1=c/p2=d/;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n6/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n6/p1=c/p2=d/datafile;
+
+EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6;
+MSCK REPAIR TABLE default.repairtable_n6;
+
+show partitions default.repairtable_n6;
+
+set hive.mapred.mode=strict;
+
+dfs -rmr ${system:test.warehouse.dir}/repairtable_n6/p1=c;
+
+EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6 DROP PARTITIONS;
+MSCK REPAIR TABLE default.repairtable_n6 DROP PARTITIONS;
+
+show partitions default.repairtable_n6;
+
+DROP TABLE default.repairtable_n6;

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/queries/clientpositive/partition_discovery.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_discovery.q 
b/ql/src/test/queries/clientpositive/partition_discovery.q
new file mode 100644
index 0000000..2f0ff87
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/partition_discovery.q
@@ -0,0 +1,77 @@
+set hive.msck.repair.batch.size=1;
+set hive.mv.files.thread=0;
+
+DROP TABLE IF EXISTS repairtable_n7;
+DROP TABLE IF EXISTS repairtable_n8;
+DROP TABLE IF EXISTS repairtable_n9;
+DROP TABLE IF EXISTS repairtable_n10;
+
+CREATE EXTERNAL TABLE repairtable_n7(col STRING) PARTITIONED BY (p1 STRING, p2 
STRING)
+LOCATION '${system:test.warehouse.dir}/repairtable_n7';
+
+describe formatted repairtable_n7;
+
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n7/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n7/p1=c/p2=d/;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n7/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n7/p1=c/p2=d/datafile;
+
+MSCK REPAIR TABLE default.repairtable_n7;
+show partitions default.repairtable_n7;
+
+CREATE EXTERNAL TABLE repairtable_n8 LIKE repairtable_n7
+LOCATION '${system:test.warehouse.dir}/repairtable_n8';
+
+describe formatted repairtable_n8;
+
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n8/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n8/p1=c/p2=d/;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n8/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n8/p1=c/p2=d/datafile;
+
+MSCK REPAIR TABLE default.repairtable_n8;
+show partitions default.repairtable_n8;
+
+CREATE EXTERNAL TABLE repairtable_n9(col STRING) PARTITIONED BY (p1 STRING, p2 
STRING)
+LOCATION '${system:test.warehouse.dir}/repairtable_n9' tblproperties 
("partition.retention.period"="10s");
+
+describe formatted repairtable_n9;
+
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n9/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n9/p1=c/p2=d/;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n9/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n9/p1=c/p2=d/datafile;
+
+set msck.repair.enable.partition.retention=false;
+MSCK REPAIR TABLE default.repairtable_n9;
+show partitions default.repairtable_n9;
+
+!sleep 12;
+
+set msck.repair.enable.partition.retention=true;
+-- msck does not drop partitions, so this still should be no-op
+MSCK REPAIR TABLE default.repairtable_n9;
+show partitions default.repairtable_n9;
+
+-- this will drop old partitions
+MSCK REPAIR TABLE default.repairtable_n9 SYNC PARTITIONS;
+show partitions default.repairtable_n9;
+
+CREATE EXTERNAL TABLE repairtable_n10 PARTITIONED BY(p1,p2) STORED AS ORC AS 
SELECT * FROM repairtable_n9;
+describe formatted repairtable_n10;
+
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n10/p1=a/p2=b/;
+dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/repairtable_n10/p1=c/p2=d/;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n10/p1=a/p2=b/datafile;
+dfs -touchz ${system:test.warehouse.dir}/repairtable_n10/p1=c/p2=d/datafile;
+
+set msck.repair.enable.partition.retention=false;
+!sleep 12;
+MSCK REPAIR TABLE default.repairtable_n10;
+show partitions default.repairtable_n10;
+
+
+DROP TABLE default.repairtable_n7;
+DROP TABLE default.repairtable_n8;
+DROP TABLE default.repairtable_n9;
+DROP TABLE default.repairtable_n10;

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/create_like.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like.q.out 
b/ql/src/test/results/clientpositive/create_like.q.out
index f4a5ed5..6d4e14a 100644
--- a/ql/src/test/results/clientpositive/create_like.q.out
+++ b/ql/src/test/results/clientpositive/create_like.q.out
@@ -118,6 +118,7 @@ Table Type:                 EXTERNAL_TABLE
 Table Parameters:               
        COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
        EXTERNAL                TRUE                
+       discover.partitions     true                
        numFiles                0                   
        numRows                 0                   
        rawDataSize             0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/create_like_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like_view.q.out 
b/ql/src/test/results/clientpositive/create_like_view.q.out
index 870f280..7e33e50 100644
--- a/ql/src/test/results/clientpositive/create_like_view.q.out
+++ b/ql/src/test/results/clientpositive/create_like_view.q.out
@@ -172,6 +172,7 @@ Table Parameters:
        COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
        numFiles                0                   
        numRows                 0                   
        rawDataSize             0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/default_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/default_file_format.q.out 
b/ql/src/test/results/clientpositive/default_file_format.q.out
index 0adf5ae..beef419 100644
--- a/ql/src/test/results/clientpositive/default_file_format.q.out
+++ b/ql/src/test/results/clientpositive/default_file_format.q.out
@@ -172,6 +172,7 @@ Table Type:                 EXTERNAL_TABLE
 Table Parameters:               
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
 #### A masked pattern was here ####
                 
 # Storage Information           
@@ -236,6 +237,7 @@ Table Type:                 EXTERNAL_TABLE
 Table Parameters:               
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
        numFiles                0                   
        totalSize               0                   
 #### A masked pattern was here ####
@@ -472,6 +474,7 @@ Table Type:                 EXTERNAL_TABLE
 Table Parameters:               
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
        numFiles                0                   
        totalSize               0                   
 #### A masked pattern was here ####
@@ -538,6 +541,7 @@ Table Type:                 EXTERNAL_TABLE
 Table Parameters:               
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
        numFiles                0                   
        totalSize               0                   
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out 
b/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
index 883994c..14522fb 100644
--- a/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidkafkamini_basic.q.out
@@ -355,6 +355,7 @@ STAGE PLANS:
                     columns __time,page,user,language,added,deleted
                     columns.comments 
                     columns.types timestamp:string:string:string:int:int
+                    discover.partitions true
                     druid.datasource default.druid_kafka_test
                     druid.fieldNames language,user
                     druid.fieldTypes string,string
@@ -396,6 +397,7 @@ STAGE PLANS:
                       columns __time,page,user,language,added,deleted
                       columns.comments 
                       columns.types timestamp:string:string:string:int:int
+                      discover.partitions true
                       druid.datasource default.druid_kafka_test
                       druid.fieldNames language,user
                       druid.fieldTypes string,string

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out 
b/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
index 9c9af44..b07ed52 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
@@ -245,6 +245,7 @@ STAGE PLANS:
                     columns 
__time,cstring1,cstring2,cdouble,cfloat,ctinyint,csmallint,cint,cbigint,cboolean1,cboolean2
                     columns.comments 
                     columns.types timestamp with local time 
zone:string:string:double:float:tinyint:smallint:int:bigint:boolean:boolean
+                    discover.partitions true
                     druid.datasource default.druid_table_alltypesorc
                     druid.fieldNames vc
                     druid.fieldTypes int
@@ -277,6 +278,7 @@ STAGE PLANS:
                       columns 
__time,cstring1,cstring2,cdouble,cfloat,ctinyint,csmallint,cint,cbigint,cboolean1,cboolean2
                       columns.comments 
                       columns.types timestamp with local time 
zone:string:string:double:float:tinyint:smallint:int:bigint:boolean:boolean
+                      discover.partitions true
                       druid.datasource default.druid_table_alltypesorc
                       druid.fieldNames vc
                       druid.fieldTypes int

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out 
b/ql/src/test/results/clientpositive/druid_topn.q.out
index 179902a..755e977 100644
--- a/ql/src/test/results/clientpositive/druid_topn.q.out
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -42,6 +42,7 @@ Table Parameters:
        COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}}
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
        druid.datasource        wikipedia           
        numFiles                0                   
        numRows                 0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/explain_locks.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_locks.q.out 
b/ql/src/test/results/clientpositive/explain_locks.q.out
index ed7f1e8..3183533 100644
--- a/ql/src/test/results/clientpositive/explain_locks.q.out
+++ b/ql/src/test/results/clientpositive/explain_locks.q.out
@@ -2,6 +2,7 @@ PREHOOK: query: explain locks drop table test_explain_locks
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: explain locks drop table test_explain_locks
 POSTHOOK: type: DROPTABLE
+LOCK INFORMATION:
 PREHOOK: query: explain locks create table test_explain_locks (a int, b int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/llap/external_table_purge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/external_table_purge.q.out 
b/ql/src/test/results/clientpositive/llap/external_table_purge.q.out
index 24c778e..4e2f6a3 100644
--- a/ql/src/test/results/clientpositive/llap/external_table_purge.q.out
+++ b/ql/src/test/results/clientpositive/llap/external_table_purge.q.out
@@ -119,6 +119,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
   'external.table.purge'='false', 
 #### A masked pattern was here ####
 test.comment=Table should have data
@@ -168,6 +169,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
   'external.table.purge'='true', 
 #### A masked pattern was here ####
 test.comment=Table should have data
@@ -451,6 +453,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
   'external.table.purge'='false', 
 #### A masked pattern was here ####
 PREHOOK: query: alter table etp_2 add partition (p1='part1')
@@ -520,6 +523,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
   'external.table.purge'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: alter table etp_2 add partition (p1='part1')

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/llap/mm_exim.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_exim.q.out 
b/ql/src/test/results/clientpositive/llap/mm_exim.q.out
index ee6cf06..868d107 100644
--- a/ql/src/test/results/clientpositive/llap/mm_exim.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_exim.q.out
@@ -643,6 +643,7 @@ Table Type:                 EXTERNAL_TABLE
 Table Parameters:               
        EXTERNAL                TRUE                
        bucketing_version       2                   
+       discover.partitions     true                
        numFiles                3                   
        numRows                 6                   
        rawDataSize             37                  

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/llap/strict_managed_tables2.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/strict_managed_tables2.q.out 
b/ql/src/test/results/clientpositive/llap/strict_managed_tables2.q.out
index f3b6152..348266c 100644
--- a/ql/src/test/results/clientpositive/llap/strict_managed_tables2.q.out
+++ b/ql/src/test/results/clientpositive/llap/strict_managed_tables2.q.out
@@ -49,6 +49,7 @@ LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: create table smt2_tab2 (c1 string, c2 string)
 PREHOOK: type: CREATETABLE
@@ -137,6 +138,7 @@ LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: create table smt2_tab5 (c1 string, c2 string)
 PREHOOK: type: CREATETABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/llap/table_nonprintable.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/table_nonprintable.q.out 
b/ql/src/test/results/clientpositive/llap/table_nonprintable.q.out
index 8221b8c..9dc8710 100644
--- a/ql/src/test/results/clientpositive/llap/table_nonprintable.q.out
+++ b/ql/src/test/results/clientpositive/llap/table_nonprintable.q.out
@@ -26,8 +26,8 @@ POSTHOOK: query: msck repair table table_external
 POSTHOOK: type: MSCK
 POSTHOOK: Output: default@table_external
 Partitions not in metastore:   table_external:day=¢Bar
-Repair: Cannot add partition table_external:day=Foo due to invalid characters 
in the name
 #### A masked pattern was here ####
+Repair: Cannot add partition table_external:day=Foo due to invalid characters 
in the name
 Found 2 items
 drwxr-xr-x   - ### USER ### ### GROUP ###          0 ### HDFS DATE ### 
hdfs://### HDFS PATH ###Foo
 drwxr-xr-x   - ### USER ### ### GROUP ###          0 ### HDFS DATE ### 
hdfs://### HDFS PATH ###¢Bar

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/llap/whroot_external1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/whroot_external1.q.out 
b/ql/src/test/results/clientpositive/llap/whroot_external1.q.out
index cac158c..4333bf4 100644
--- a/ql/src/test/results/clientpositive/llap/whroot_external1.q.out
+++ b/ql/src/test/results/clientpositive/llap/whroot_external1.q.out
@@ -72,6 +72,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: insert into table wre1_ext1 select * from src where key < 5
 PREHOOK: type: QUERY
@@ -157,6 +158,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: insert into table wre1_ext2 select * from src where key < 5
 PREHOOK: type: QUERY
@@ -246,6 +248,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: insert into table wre1_db.wre1_ext3 select * from src where 
key < 5
 PREHOOK: type: QUERY
@@ -331,6 +334,7 @@ LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
   'bucketing_version'='2', 
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: insert into table wre1_db.wre1_ext4 select * from src where 
key < 5
 PREHOOK: type: QUERY
@@ -413,6 +417,7 @@ OUTPUTFORMAT
 LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: insert into table wre1_ext5 select * from src where key < 5
 PREHOOK: type: QUERY
@@ -495,6 +500,7 @@ OUTPUTFORMAT
 LOCATION
   'hdfs://### HDFS PATH ###'
 TBLPROPERTIES (
+  'discover.partitions'='true', 
 #### A masked pattern was here ####
 PREHOOK: query: insert into table wre1_db.wre1_ext6 select * from src where 
key < 5
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/msck_repair_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/msck_repair_acid.q.out 
b/ql/src/test/results/clientpositive/msck_repair_acid.q.out
new file mode 100644
index 0000000..902a4b7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/msck_repair_acid.q.out
@@ -0,0 +1,88 @@
+PREHOOK: query: DROP TABLE IF EXISTS repairtable_n6
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n6
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE repairtable_n6(col STRING) PARTITIONED BY (p1 
STRING, p2 STRING) STORED AS ORC tblproperties ("transactional"="true", 
"transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: CREATE TABLE repairtable_n6(col STRING) PARTITIONED BY (p1 
STRING, p2 STRING) STORED AS ORC tblproperties ("transactional"="true", 
"transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@repairtable_n6
+PREHOOK: query: EXPLAIN LOCKS MSCK TABLE repairtable_n6
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: EXPLAIN LOCKS MSCK TABLE repairtable_n6
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n6
+LOCK INFORMATION:
+default.repairtable_n6 -> SHARED_READ
+PREHOOK: query: MSCK TABLE repairtable_n6
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: MSCK TABLE repairtable_n6
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n6
+PREHOOK: query: show partitions repairtable_n6
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n6
+POSTHOOK: query: show partitions repairtable_n6
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n6
+PREHOOK: query: EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n6
+LOCK INFORMATION:
+default.repairtable_n6 -> EXCLUSIVE
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n6
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n6
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n6
+Partitions not in metastore:   repairtable_n6:p1=a/p2=b        
repairtable_n6:p1=c/p2=d
+#### A masked pattern was here ####
+PREHOOK: query: show partitions default.repairtable_n6
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n6
+POSTHOOK: query: show partitions default.repairtable_n6
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n6
+p1=a/p2=b
+p1=c/p2=d
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6 DROP 
PARTITIONS
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: EXPLAIN LOCKS MSCK REPAIR TABLE default.repairtable_n6 DROP 
PARTITIONS
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n6
+LOCK INFORMATION:
+default.repairtable_n6 -> EXCLUSIVE
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n6 DROP PARTITIONS
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n6 DROP PARTITIONS
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n6
+Partitions missing from filesystem:    repairtable_n6:p1=c/p2=d
+Repair: Dropped partition from metastore hive.default.repairtable_n6:p1=c/p2=d
+PREHOOK: query: show partitions default.repairtable_n6
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n6
+POSTHOOK: query: show partitions default.repairtable_n6
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n6
+p1=a/p2=b
+PREHOOK: query: DROP TABLE default.repairtable_n6
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@repairtable_n6
+PREHOOK: Output: default@repairtable_n6
+POSTHOOK: query: DROP TABLE default.repairtable_n6
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@repairtable_n6
+POSTHOOK: Output: default@repairtable_n6

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/msck_repair_drop.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/msck_repair_drop.q.out 
b/ql/src/test/results/clientpositive/msck_repair_drop.q.out
index 2456734..27b718c 100644
--- a/ql/src/test/results/clientpositive/msck_repair_drop.q.out
+++ b/ql/src/test/results/clientpositive/msck_repair_drop.q.out
@@ -58,16 +58,16 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 
DROP PARTITIONS
 POSTHOOK: type: MSCK
 POSTHOOK: Output: default@repairtable_n1
 Partitions missing from filesystem:    repairtable_n1:p1=2/p2=21       
repairtable_n1:p1=2/p2=210      repairtable_n1:p1=2/p2=22       
repairtable_n1:p1=2/p2=23       repairtable_n1:p1=2/p2=24       
repairtable_n1:p1=2/p2=25       repairtable_n1:p1=2/p2=26       
repairtable_n1:p1=2/p2=27       repairtable_n1:p1=2/p2=28       
repairtable_n1:p1=2/p2=29
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=21
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=210
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=22
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=23
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=24
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=25
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=26
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=27
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=28
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=29
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=21
+Repair: Dropped partition from metastore 
hive.default.repairtable_n1:p1=2/p2=210
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=22
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=23
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=24
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=25
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=26
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=27
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=28
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=29
 PREHOOK: query: show partitions default.repairtable_n1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@repairtable_n1
@@ -124,16 +124,16 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 
DROP PARTITIONS
 POSTHOOK: type: MSCK
 POSTHOOK: Output: default@repairtable_n1
 Partitions missing from filesystem:    repairtable_n1:p1=2/p2=21       
repairtable_n1:p1=2/p2=210      repairtable_n1:p1=2/p2=22       
repairtable_n1:p1=2/p2=23       repairtable_n1:p1=2/p2=24       
repairtable_n1:p1=2/p2=25       repairtable_n1:p1=2/p2=26       
repairtable_n1:p1=2/p2=27       repairtable_n1:p1=2/p2=28       
repairtable_n1:p1=2/p2=29
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=21
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=210
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=22
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=23
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=24
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=25
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=26
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=27
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=28
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=29
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=21
+Repair: Dropped partition from metastore 
hive.default.repairtable_n1:p1=2/p2=210
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=22
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=23
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=24
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=25
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=26
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=27
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=28
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=29
 PREHOOK: query: show partitions default.repairtable_n1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@repairtable_n1
@@ -190,16 +190,16 @@ POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n1 
DROP PARTITIONS
 POSTHOOK: type: MSCK
 POSTHOOK: Output: default@repairtable_n1
 Partitions missing from filesystem:    repairtable_n1:p1=2/p2=21       
repairtable_n1:p1=2/p2=210      repairtable_n1:p1=2/p2=22       
repairtable_n1:p1=2/p2=23       repairtable_n1:p1=2/p2=24       
repairtable_n1:p1=2/p2=25       repairtable_n1:p1=2/p2=26       
repairtable_n1:p1=2/p2=27       repairtable_n1:p1=2/p2=28       
repairtable_n1:p1=2/p2=29
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=21
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=210
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=22
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=23
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=24
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=25
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=26
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=27
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=28
-Repair: Dropped partition from metastore default.repairtable_n1:p1=2/p2=29
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=21
+Repair: Dropped partition from metastore 
hive.default.repairtable_n1:p1=2/p2=210
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=22
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=23
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=24
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=25
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=26
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=27
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=28
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=2/p2=29
 PREHOOK: query: show partitions default.repairtable_n1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@repairtable_n1
@@ -279,8 +279,8 @@ POSTHOOK: type: MSCK
 POSTHOOK: Output: default@repairtable_n1
 Partitions not in metastore:   repairtable_n1:p1=5/p2=51       
repairtable_n1:p1=5/p2=52
 Partitions missing from filesystem:    repairtable_n1:p1=3/p2=31       
repairtable_n1:p1=3/p2=32
-Repair: Dropped partition from metastore default.repairtable_n1:p1=3/p2=31
-Repair: Dropped partition from metastore default.repairtable_n1:p1=3/p2=32
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=3/p2=31
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=3/p2=32
 PREHOOK: query: show partitions default.repairtable_n1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@repairtable_n1
@@ -309,8 +309,8 @@ POSTHOOK: Output: default@repairtable_n1
 Partitions not in metastore:   repairtable_n1:p1=5/p2=51       
repairtable_n1:p1=5/p2=52
 Partitions missing from filesystem:    repairtable_n1:p1=4/p2=41       
repairtable_n1:p1=4/p2=42
 #### A masked pattern was here ####
-Repair: Dropped partition from metastore default.repairtable_n1:p1=4/p2=41
-Repair: Dropped partition from metastore default.repairtable_n1:p1=4/p2=42
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=4/p2=41
+Repair: Dropped partition from metastore hive.default.repairtable_n1:p1=4/p2=42
 PREHOOK: query: show partitions default.repairtable_n1
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@repairtable_n1

http://git-wip-us.apache.org/repos/asf/hive/blob/64bea035/ql/src/test/results/clientpositive/partition_discovery.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_discovery.q.out 
b/ql/src/test/results/clientpositive/partition_discovery.q.out
new file mode 100644
index 0000000..9075136
--- /dev/null
+++ b/ql/src/test/results/clientpositive/partition_discovery.q.out
@@ -0,0 +1,357 @@
+PREHOOK: query: DROP TABLE IF EXISTS repairtable_n7
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n7
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS repairtable_n8
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n8
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS repairtable_n9
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n9
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS repairtable_n10
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS repairtable_n10
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE EXTERNAL TABLE repairtable_n7(col STRING) PARTITIONED 
BY (p1 STRING, p2 STRING)
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@repairtable_n7
+POSTHOOK: query: CREATE EXTERNAL TABLE repairtable_n7(col STRING) PARTITIONED 
BY (p1 STRING, p2 STRING)
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@repairtable_n7
+PREHOOK: query: describe formatted repairtable_n7
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@repairtable_n7
+POSTHOOK: query: describe formatted repairtable_n7
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@repairtable_n7
+# col_name             data_type               comment             
+col                    string                                      
+                
+# Partition Information                 
+# col_name             data_type               comment             
+p1                     string                                      
+p2                     string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            EXTERNAL_TABLE           
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+       EXTERNAL                TRUE                
+       bucketing_version       2                   
+       discover.partitions     true                
+       numFiles                0                   
+       numPartitions           0                   
+       numRows                 0                   
+       rawDataSize             0                   
+       totalSize               0                   
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe      
 
+InputFormat:           org.apache.hadoop.mapred.TextInputFormat         
+OutputFormat:          
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat       
+Compressed:            No                       
+Num Buckets:           -1                       
+Bucket Columns:        []                       
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n7
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n7
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n7
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n7
+Partitions not in metastore:   repairtable_n7:p1=a/p2=b        
repairtable_n7:p1=c/p2=d
+#### A masked pattern was here ####
+PREHOOK: query: show partitions default.repairtable_n7
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n7
+POSTHOOK: query: show partitions default.repairtable_n7
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n7
+p1=a/p2=b
+p1=c/p2=d
+PREHOOK: query: CREATE EXTERNAL TABLE repairtable_n8 LIKE repairtable_n7
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@repairtable_n8
+POSTHOOK: query: CREATE EXTERNAL TABLE repairtable_n8 LIKE repairtable_n7
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@repairtable_n8
+PREHOOK: query: describe formatted repairtable_n8
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@repairtable_n8
+POSTHOOK: query: describe formatted repairtable_n8
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@repairtable_n8
+# col_name             data_type               comment             
+col                    string                                      
+                
+# Partition Information                 
+# col_name             data_type               comment             
+p1                     string                                      
+p2                     string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            EXTERNAL_TABLE           
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+       EXTERNAL                TRUE                
+       discover.partitions     true                
+       numFiles                0                   
+       numPartitions           0                   
+       numRows                 0                   
+       rawDataSize             0                   
+       totalSize               0                   
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe      
 
+InputFormat:           org.apache.hadoop.mapred.TextInputFormat         
+OutputFormat:          
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat       
+Compressed:            No                       
+Num Buckets:           -1                       
+Bucket Columns:        []                       
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n8
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n8
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n8
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n8
+Partitions not in metastore:   repairtable_n8:p1=a/p2=b        
repairtable_n8:p1=c/p2=d
+#### A masked pattern was here ####
+PREHOOK: query: show partitions default.repairtable_n8
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n8
+POSTHOOK: query: show partitions default.repairtable_n8
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n8
+p1=a/p2=b
+p1=c/p2=d
+PREHOOK: query: CREATE EXTERNAL TABLE repairtable_n9(col STRING) PARTITIONED 
BY (p1 STRING, p2 STRING)
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@repairtable_n9
+POSTHOOK: query: CREATE EXTERNAL TABLE repairtable_n9(col STRING) PARTITIONED 
BY (p1 STRING, p2 STRING)
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@repairtable_n9
+PREHOOK: query: describe formatted repairtable_n9
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@repairtable_n9
+POSTHOOK: query: describe formatted repairtable_n9
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@repairtable_n9
+# col_name             data_type               comment             
+col                    string                                      
+                
+# Partition Information                 
+# col_name             data_type               comment             
+p1                     string                                      
+p2                     string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            EXTERNAL_TABLE           
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+       EXTERNAL                TRUE                
+       bucketing_version       2                   
+       discover.partitions     true                
+       numFiles                0                   
+       numPartitions           0                   
+       numRows                 0                   
+       partition.retention.period      10s                 
+       rawDataSize             0                   
+       totalSize               0                   
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe      
 
+InputFormat:           org.apache.hadoop.mapred.TextInputFormat         
+OutputFormat:          
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat       
+Compressed:            No                       
+Num Buckets:           -1                       
+Bucket Columns:        []                       
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n9
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n9
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n9
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n9
+Partitions not in metastore:   repairtable_n9:p1=a/p2=b        
repairtable_n9:p1=c/p2=d
+#### A masked pattern was here ####
+PREHOOK: query: show partitions default.repairtable_n9
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n9
+POSTHOOK: query: show partitions default.repairtable_n9
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n9
+p1=a/p2=b
+p1=c/p2=d
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n9
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n9
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n9
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n9
+Expired partitions (retention period: 10s) :   repairtable_n9:p1=a/p2=b        
repairtable_n9:p1=c/p2=d
+PREHOOK: query: show partitions default.repairtable_n9
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n9
+POSTHOOK: query: show partitions default.repairtable_n9
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n9
+p1=a/p2=b
+p1=c/p2=d
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n9 SYNC PARTITIONS
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n9
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n9 SYNC PARTITIONS
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n9
+Expired partitions (retention period: 10s) :   repairtable_n9:p1=a/p2=b        
repairtable_n9:p1=c/p2=d
+Repair: Dropped partition from metastore hive.default.repairtable_n9:p1=a/p2=b
+Repair: Dropped partition from metastore hive.default.repairtable_n9:p1=c/p2=d
+PREHOOK: query: show partitions default.repairtable_n9
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n9
+POSTHOOK: query: show partitions default.repairtable_n9
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n9
+PREHOOK: query: CREATE EXTERNAL TABLE repairtable_n10 PARTITIONED BY(p1,p2) 
STORED AS ORC AS SELECT * FROM repairtable_n9
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@repairtable_n9
+PREHOOK: Output: database:default
+PREHOOK: Output: default@repairtable_n10
+PREHOOK: Output: default@repairtable_n10
+POSTHOOK: query: CREATE EXTERNAL TABLE repairtable_n10 PARTITIONED BY(p1,p2) 
STORED AS ORC AS SELECT * FROM repairtable_n9
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@repairtable_n9
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@repairtable_n10
+PREHOOK: query: describe formatted repairtable_n10
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@repairtable_n10
+POSTHOOK: query: describe formatted repairtable_n10
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@repairtable_n10
+# col_name             data_type               comment             
+col                    string                                      
+                
+# Partition Information                 
+# col_name             data_type               comment             
+p1                     string                                      
+p2                     string                                      
+                
+# Detailed Table Information            
+Database:              default                  
+#### A masked pattern was here ####
+Retention:             0                        
+#### A masked pattern was here ####
+Table Type:            EXTERNAL_TABLE           
+Table Parameters:               
+       COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+       EXTERNAL                TRUE                
+       bucketing_version       2                   
+       discover.partitions     true                
+       numFiles                0                   
+       numPartitions           0                   
+       numRows                 0                   
+       rawDataSize             0                   
+       totalSize               0                   
+#### A masked pattern was here ####
+                
+# Storage Information           
+SerDe Library:         org.apache.hadoop.hive.ql.io.orc.OrcSerde        
+InputFormat:           org.apache.hadoop.hive.ql.io.orc.OrcInputFormat  
+OutputFormat:          org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat        
 
+Compressed:            No                       
+Num Buckets:           -1                       
+Bucket Columns:        []                       
+Sort Columns:          []                       
+Storage Desc Params:            
+       serialization.format    1                   
+PREHOOK: query: MSCK REPAIR TABLE default.repairtable_n10
+PREHOOK: type: MSCK
+PREHOOK: Output: default@repairtable_n10
+POSTHOOK: query: MSCK REPAIR TABLE default.repairtable_n10
+POSTHOOK: type: MSCK
+POSTHOOK: Output: default@repairtable_n10
+Partitions not in metastore:   repairtable_n10:p1=a/p2=b       
repairtable_n10:p1=c/p2=d
+#### A masked pattern was here ####
+PREHOOK: query: show partitions default.repairtable_n10
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@repairtable_n10
+POSTHOOK: query: show partitions default.repairtable_n10
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@repairtable_n10
+p1=a/p2=b
+p1=c/p2=d
+PREHOOK: query: DROP TABLE default.repairtable_n7
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@repairtable_n7
+PREHOOK: Output: default@repairtable_n7
+POSTHOOK: query: DROP TABLE default.repairtable_n7
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@repairtable_n7
+POSTHOOK: Output: default@repairtable_n7
+PREHOOK: query: DROP TABLE default.repairtable_n8
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@repairtable_n8
+PREHOOK: Output: default@repairtable_n8
+POSTHOOK: query: DROP TABLE default.repairtable_n8
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@repairtable_n8
+POSTHOOK: Output: default@repairtable_n8
+PREHOOK: query: DROP TABLE default.repairtable_n9
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@repairtable_n9
+PREHOOK: Output: default@repairtable_n9
+POSTHOOK: query: DROP TABLE default.repairtable_n9
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@repairtable_n9
+POSTHOOK: Output: default@repairtable_n9
+PREHOOK: query: DROP TABLE default.repairtable_n10
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@repairtable_n10
+PREHOOK: Output: default@repairtable_n10
+POSTHOOK: query: DROP TABLE default.repairtable_n10
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@repairtable_n10
+POSTHOOK: Output: default@repairtable_n10

Reply via email to