This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new e401b5b3c6a HIVE-27097: Improve the retry strategy for MetaStore
client and server (Wechar Yu, reviewed by Denys Kuzmenko, Sai Hemanth Gantasala)
e401b5b3c6a is described below
commit e401b5b3c6ac7064615dc9f28cc9d974b815b070
Author: Wechar Yu <[email protected]>
AuthorDate: Thu Mar 23 04:39:09 2023 +0800
HIVE-27097: Improve the retry strategy for MetaStore client and server
(Wechar Yu, reviewed by Denys Kuzmenko, Sai Hemanth Gantasala)
Closes #4076
---
.../tools/schematool/TestSchemaToolCatalogOps.java | 245 ++++++++++++---------
.../hive/metastore/RetryingMetaStoreClient.java | 43 +---
.../hadoop/hive/metastore/RetryingHMSHandler.java | 21 +-
.../metastore/TestRetriesInRetryingHMSHandler.java | 56 ++++-
4 files changed, 214 insertions(+), 151 deletions(-)
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java
index 9669cd4264f..40538bc0660 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolCatalogOps.java
@@ -35,6 +35,7 @@ import
org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder;
import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hive.com.google.common.io.Files;
import org.apache.thrift.TException;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -47,6 +48,7 @@ import java.io.IOException;
import java.io.PrintStream;
import java.util.Collections;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
@@ -60,6 +62,7 @@ public class TestSchemaToolCatalogOps {
private static PrintStream errStream;
private static PrintStream outStream;
private static String argsBase;
+ private static File catalogLocation;
@BeforeClass
public static void initDb() throws HiveMetaException, IOException {
@@ -83,6 +86,7 @@ public class TestSchemaToolCatalogOps {
outStream = System.out;
argsBase = "-dbType derby -userName " + userName + " -passWord " +
passWord + " ";
+ catalogLocation = Files.createTempDir();
execute(new SchemaToolTaskInit(), "-initSchema"); // Pre-install the
database so all the tables are there.
}
@@ -92,6 +96,9 @@ public class TestSchemaToolCatalogOps {
if (metaStoreDir.exists()) {
FileUtils.forceDeleteOnExit(metaStoreDir);
}
+ if (catalogLocation.exists()) {
+ FileUtils.forceDeleteOnExit(catalogLocation);
+ }
System.setOut(outStream);
System.setErr(errStream);
}
@@ -205,92 +212,88 @@ public class TestSchemaToolCatalogOps {
String funcName = "movedbfunc";
String partVal = "moveDbKey";
- new CatalogBuilder()
- .setName(toCatName)
- .setLocation("file:///tmp")
- .create(client);
-
- Database db = new DatabaseBuilder()
- .setCatalogName(DEFAULT_CATALOG_NAME)
- .setName(dbName)
- .create(client, conf);
-
- new FunctionBuilder()
- .inDb(db)
- .setName(funcName)
- .setClass("org.apache.hive.myudf")
- .create(client, conf);
-
- Table table = new TableBuilder()
- .inDb(db)
- .setTableName(tableName)
- .addCol("a", "int")
- .addPartCol("p", "string")
- .create(client, conf);
-
- new PartitionBuilder()
- .inTable(table)
- .addValue(partVal)
- .addToTable(client, conf);
-
- String argsMoveDB = String.format("-moveDatabase %s -fromCatalog %s
-toCatalog %s", dbName,
- DEFAULT_CATALOG_NAME, toCatName);
- execute(new SchemaToolTaskMoveDatabase(), argsMoveDB);
-
- Database fetchedDb = client.getDatabase(toCatName, dbName);
- Assert.assertNotNull(fetchedDb);
- Assert.assertEquals(toCatName.toLowerCase(), fetchedDb.getCatalogName());
-
- Function fetchedFunction = client.getFunction(toCatName, dbName, funcName);
- Assert.assertNotNull(fetchedFunction);
- Assert.assertEquals(toCatName.toLowerCase(), fetchedFunction.getCatName());
- Assert.assertEquals(dbName.toLowerCase(), fetchedFunction.getDbName());
-
- Table fetchedTable = client.getTable(toCatName, dbName, tableName);
- Assert.assertNotNull(fetchedTable);
- Assert.assertEquals(toCatName.toLowerCase(), fetchedTable.getCatName());
- Assert.assertEquals(dbName.toLowerCase(), fetchedTable.getDbName());
+ withCatalog(toCatName, catName -> {
+ Database db = new DatabaseBuilder()
+ .setCatalogName(DEFAULT_CATALOG_NAME)
+ .setName(dbName)
+ .create(client, conf);
+
+ new FunctionBuilder()
+ .inDb(db)
+ .setName(funcName)
+ .setClass("org.apache.hive.myudf")
+ .create(client, conf);
+
+ Table table = new TableBuilder()
+ .inDb(db)
+ .setTableName(tableName)
+ .addCol("a", "int")
+ .addPartCol("p", "string")
+ .create(client, conf);
+
+ new PartitionBuilder()
+ .inTable(table)
+ .addValue(partVal)
+ .addToTable(client, conf);
+
+ String argsMoveDB = String.format("-moveDatabase %s -fromCatalog %s
-toCatalog %s", dbName,
+ DEFAULT_CATALOG_NAME, catName);
+ execute(new SchemaToolTaskMoveDatabase(), argsMoveDB);
- Partition fetchedPart =
- client.getPartition(toCatName, dbName, tableName,
Collections.singletonList(partVal));
- Assert.assertNotNull(fetchedPart);
- Assert.assertEquals(toCatName.toLowerCase(), fetchedPart.getCatName());
- Assert.assertEquals(dbName.toLowerCase(), fetchedPart.getDbName());
- Assert.assertEquals(tableName.toLowerCase(), fetchedPart.getTableName());
+ Database fetchedDb = client.getDatabase(catName, dbName);
+ Assert.assertNotNull(fetchedDb);
+ Assert.assertEquals(catName.toLowerCase(), fetchedDb.getCatalogName());
+
+ Function fetchedFunction = client.getFunction(catName, dbName, funcName);
+ Assert.assertNotNull(fetchedFunction);
+ Assert.assertEquals(catName.toLowerCase(), fetchedFunction.getCatName());
+ Assert.assertEquals(dbName.toLowerCase(), fetchedFunction.getDbName());
+
+ Table fetchedTable = client.getTable(catName, dbName, tableName);
+ Assert.assertNotNull(fetchedTable);
+ Assert.assertEquals(catName.toLowerCase(), fetchedTable.getCatName());
+ Assert.assertEquals(dbName.toLowerCase(), fetchedTable.getDbName());
+
+ Partition fetchedPart =
+ client.getPartition(catName, dbName, tableName,
Collections.singletonList(partVal));
+ Assert.assertNotNull(fetchedPart);
+ Assert.assertEquals(catName.toLowerCase(), fetchedPart.getCatName());
+ Assert.assertEquals(dbName.toLowerCase(), fetchedPart.getDbName());
+ Assert.assertEquals(tableName.toLowerCase(), fetchedPart.getTableName());
+
+ // drop the function
+ client.dropFunction(catName, dbName, funcName);
+ });
}
@Test
public void moveDatabaseWithExistingDbOfSameNameAlreadyInTargetCatalog()
throws TException, HiveMetaException {
String catName = "clobberCatalog";
- new CatalogBuilder()
- .setName(catName)
- .setLocation("file:///tmp")
- .create(client);
- try {
- String argsMoveDB = String.format("-moveDatabase %s -fromCatalog %s
-toCatalog %s",
- DEFAULT_DATABASE_NAME, catName, DEFAULT_CATALOG_NAME);
- execute(new SchemaToolTaskMoveDatabase(), argsMoveDB);
- Assert.fail("Attempt to move default database should have failed.");
- } catch (HiveMetaException e) {
- // good
- }
-
- // Make sure nothing really moved
- Set<String> dbNames = new
HashSet<>(client.getAllDatabases(DEFAULT_CATALOG_NAME));
- Assert.assertTrue(dbNames.contains(DEFAULT_DATABASE_NAME));
+ withCatalog(catName, name -> {
+ try {
+ String argsMoveDB = String.format("-moveDatabase %s -fromCatalog %s
-toCatalog %s",
+ DEFAULT_DATABASE_NAME, catName, DEFAULT_CATALOG_NAME);
+ execute(new SchemaToolTaskMoveDatabase(), argsMoveDB);
+ Assert.fail("Attempt to move default database should have failed.");
+ } catch (HiveMetaException e) {
+ // good
+ }
+
+ // Make sure nothing really moved
+ Set<String> dbNames = new
HashSet<>(client.getAllDatabases(DEFAULT_CATALOG_NAME));
+ Assert.assertTrue(dbNames.contains(DEFAULT_DATABASE_NAME));
+ });
}
@Test(expected = HiveMetaException.class)
public void moveNonExistentDatabase() throws TException, HiveMetaException {
String catName = "moveNonExistentDb";
- new CatalogBuilder()
- .setName(catName)
- .setLocation("file:///tmp")
- .create(client);
- String argsMoveDB = String.format("-moveDatabase nosuch -fromCatalog %s
-toCatalog %s",
- catName, DEFAULT_CATALOG_NAME);
- execute(new SchemaToolTaskMoveDatabase(), argsMoveDB);
+ withCatalog(catName, name -> {
+ String argsMoveDB = String.format("-moveDatabase nosuch -fromCatalog %s
-toCatalog %s",
+ name, DEFAULT_CATALOG_NAME);
+ execute(new SchemaToolTaskMoveDatabase(), argsMoveDB);
+ });
}
@Test
@@ -320,42 +323,39 @@ public class TestSchemaToolCatalogOps {
String tableName = "moveTableTable";
String partVal = "moveTableKey";
- new CatalogBuilder()
- .setName(toCatName)
- .setLocation("file:///tmp")
- .create(client);
-
- new DatabaseBuilder()
- .setCatalogName(toCatName)
- .setName(toDbName)
- .create(client, conf);
-
- Table table = new TableBuilder()
- .setTableName(tableName)
- .addCol("a", "int")
- .addPartCol("p", "string")
- .create(client, conf);
-
- new PartitionBuilder()
- .inTable(table)
- .addValue(partVal)
- .addToTable(client, conf);
-
- String argsMoveTable = String.format("-moveTable %s -fromCatalog %s
-toCatalog %s -fromDatabase %s -toDatabase %s",
- tableName, DEFAULT_CATALOG_NAME, toCatName, DEFAULT_DATABASE_NAME,
toDbName);
- execute(new SchemaToolTaskMoveTable(), argsMoveTable);
-
- Table fetchedTable = client.getTable(toCatName, toDbName, tableName);
- Assert.assertNotNull(fetchedTable);
- Assert.assertEquals(toCatName.toLowerCase(), fetchedTable.getCatName());
- Assert.assertEquals(toDbName.toLowerCase(), fetchedTable.getDbName());
+ withCatalog(toCatName, catName -> {
+ new DatabaseBuilder()
+ .setCatalogName(toCatName)
+ .setName(toDbName)
+ .create(client, conf);
+
+ Table table = new TableBuilder()
+ .setTableName(tableName)
+ .addCol("a", "int")
+ .addPartCol("p", "string")
+ .create(client, conf);
+
+ new PartitionBuilder()
+ .inTable(table)
+ .addValue(partVal)
+ .addToTable(client, conf);
+
+ String argsMoveTable = String.format("-moveTable %s -fromCatalog %s
-toCatalog %s -fromDatabase %s -toDatabase %s",
+ tableName, DEFAULT_CATALOG_NAME, toCatName, DEFAULT_DATABASE_NAME,
toDbName);
+ execute(new SchemaToolTaskMoveTable(), argsMoveTable);
- Partition fetchedPart =
- client.getPartition(toCatName, toDbName, tableName,
Collections.singletonList(partVal));
- Assert.assertNotNull(fetchedPart);
- Assert.assertEquals(toCatName.toLowerCase(), fetchedPart.getCatName());
- Assert.assertEquals(toDbName.toLowerCase(), fetchedPart.getDbName());
- Assert.assertEquals(tableName.toLowerCase(), fetchedPart.getTableName());
+ Table fetchedTable = client.getTable(toCatName, toDbName, tableName);
+ Assert.assertNotNull(fetchedTable);
+ Assert.assertEquals(toCatName.toLowerCase(), fetchedTable.getCatName());
+ Assert.assertEquals(toDbName.toLowerCase(), fetchedTable.getDbName());
+
+ Partition fetchedPart =
+ client.getPartition(toCatName, toDbName, tableName,
Collections.singletonList(partVal));
+ Assert.assertNotNull(fetchedPart);
+ Assert.assertEquals(toCatName.toLowerCase(), fetchedPart.getCatName());
+ Assert.assertEquals(toDbName.toLowerCase(), fetchedPart.getDbName());
+ Assert.assertEquals(tableName.toLowerCase(), fetchedPart.getTableName());
+ });
}
@Test
@@ -483,4 +483,31 @@ public class TestSchemaToolCatalogOps {
task.setHiveSchemaTool(schemaTool);
task.execute();
}
+
+ @FunctionalInterface
+ private interface CheckFunc<T> {
+ void apply(T t) throws HiveMetaException, TException;
+ }
+
+ // Function will build catalog itself and clean the catalog after the
operation.
+ private void withCatalog(String catName, CheckFunc<String> f)
+ throws HiveMetaException, TException {
+ try {
+ new CatalogBuilder()
+ .setName(catName)
+ .setLocation(catalogLocation.toString())
+ .create(client);
+ f.apply(catName);
+ } finally {
+ List<String> dbNames = client.getAllDatabases(catName);
+ for (String dbName : dbNames) {
+ List<String> tblNames = client.getAllTables(catName, dbName);
+ for (String tblName : tblNames) {
+ client.dropTable(catName, dbName, tblName);
+ }
+ client.dropDatabase(catName, dbName);
+ }
+ client.dropCatalog(catName);
+ }
+ }
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index 6eadf194628..c2fa9c99ad4 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -31,7 +31,6 @@ import java.util.Arrays;
import java.util.Iterator;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -45,9 +44,7 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.metastore.annotation.NoReconnect;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TProtocolException;
import org.apache.thrift.transport.TTransportException;
import com.google.common.annotations.VisibleForTesting;
@@ -63,8 +60,6 @@ import com.google.common.annotations.VisibleForTesting;
public class RetryingMetaStoreClient implements InvocationHandler {
private static final Logger LOG =
LoggerFactory.getLogger(RetryingMetaStoreClient.class.getName());
- private static final Pattern IO_JDO_TRANSPORT_PROTOCOL_EXCEPTION_PATTERN =
-
Pattern.compile("(?s).*(IO|JDO[a-zA-Z]*|TProtocol|TTransport)Exception.*");
private final IMetaStoreClient base;
private final UserGroupInformation ugi;
@@ -224,35 +219,14 @@ public class RetryingMetaStoreClient implements
InvocationHandler {
throw e.getCause();
} catch (InvocationTargetException e) {
Throwable t = e.getCause();
- if (t instanceof TApplicationException) {
- TApplicationException tae = (TApplicationException)t;
- switch (tae.getType()) {
- case TApplicationException.UNSUPPORTED_CLIENT_TYPE:
- case TApplicationException.UNKNOWN_METHOD:
- case TApplicationException.WRONG_METHOD_NAME:
- case TApplicationException.INVALID_PROTOCOL:
- throw t;
- default:
- // TODO: most other options are probably unrecoverable... throw?
- caughtException = tae;
- }
- } else if ((t instanceof TProtocolException) || (t instanceof
TTransportException)) {
- // TODO: most protocol exceptions are probably unrecoverable...
throw?
- caughtException = (TException)t;
- } else if ((t instanceof MetaException) &&
isRecoverableMetaException((MetaException) t)) {
- caughtException = (MetaException)t;
+ // Metastore client needs retry for only TTransportException.
+ if (TTransportException.class.isAssignableFrom(t.getClass())) {
+ caughtException = (TTransportException) t;
} else {
throw t;
}
- } catch (MetaException e) {
- if (isRecoverableMetaException(e)) {
- caughtException = e;
- } else {
- throw e;
- }
}
-
if (retriesMade >= retryLimit || base.isLocalMetaStore() || !allowRetry)
{
throw caughtException;
}
@@ -264,17 +238,6 @@ public class RetryingMetaStoreClient implements
InvocationHandler {
return ret;
}
- private static boolean isRecoverableMetaException(MetaException e) {
- String m = e.getMessage();
- if (m == null) {
- return false;
- }
- if (m.contains("java.sql.SQLIntegrityConstraintViolationException")) {
- return false;
- }
- return IO_JDO_TRANSPORT_PROTOCOL_EXCEPTION_PATTERN.matcher(m).matches();
- }
-
/**
* Returns the UGI for the current user.
* @return the UGI for the current user.
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
index 5aac50e8e30..7774dfe5d46 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
@@ -23,7 +23,12 @@ import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.SQLException;
+import java.sql.SQLIntegrityConstraintViolationException;
import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
+
+import javax.jdo.JDOException;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -44,6 +49,11 @@ public class RetryingHMSHandler implements InvocationHandler
{
private static final Logger LOG =
LoggerFactory.getLogger(RetryingHMSHandler.class);
private static final String CLASS_NAME = RetryingHMSHandler.class.getName();
+ private static final Class<SQLException>[] unrecoverableSqlExceptions = new
Class[]{
+ // TODO: collect more unrecoverable SQLExceptions
+ SQLIntegrityConstraintViolationException.class
+ };
+
private static class Result {
private final Object result;
@@ -206,7 +216,7 @@ public class RetryingHMSHandler implements
InvocationHandler {
Throwable rootCause = ExceptionUtils.getRootCause(caughtException);
String errorMessage = ExceptionUtils.getMessage(caughtException) +
(rootCause == null ? "" : ("\nRoot cause: " + rootCause));
- if (retryCount >= retryLimit) {
+ if (retryCount >= retryLimit ||
!isRecoverableException(caughtException)) {
LOG.error("HMSHandler Fatal error: " +
ExceptionUtils.getStackTrace(caughtException));
throw new MetaException(errorMessage);
}
@@ -227,6 +237,15 @@ public class RetryingHMSHandler implements
InvocationHandler {
}
}
+ private boolean isRecoverableException(Throwable t) {
+ if (!(t instanceof JDOException || t instanceof NucleusException)) {
+ return false;
+ }
+
+ return Stream.of(unrecoverableSqlExceptions)
+ .allMatch(ex -> ExceptionUtils.indexOfType(t, ex) < 0);
+ }
+
public Configuration getActiveConf() {
return activeConf;
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
index 771af9dfd6d..b970c54006f 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.metastore;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.sql.BatchUpdateException;
-import java.sql.SQLException;
import java.sql.SQLIntegrityConstraintViolationException;
import java.util.concurrent.TimeUnit;
@@ -141,4 +140,59 @@ public class TestRetriesInRetryingHMSHandler {
Assert.assertTrue(e.getMessage().contains("java.sql.SQLIntegrityConstraintViolationException"));
}
}
+
+ @Test
+ public void testUnrecoverableException() throws MetaException {
+ IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class);
+ Mockito.when(mockBaseHandler.getConf()).thenReturn(conf);
+ SQLIntegrityConstraintViolationException sqlException =
+ new SQLIntegrityConstraintViolationException("Cannot delete or
update a parent row");
+ BatchUpdateException updateException = new
BatchUpdateException(sqlException);
+ NucleusDataStoreException nucleusException = new NucleusDataStoreException(
+ "Clear request failed: DELETE FROM `PARTITION_PARAMS` WHERE
`PART_ID`=?", updateException);
+ JDOUserException jdoException = new JDOUserException(
+ "One or more instances could not be deleted", nucleusException);
+ // SQLIntegrityConstraintViolationException wrapped in
BatchUpdateException wrapped in
+ // NucleusDataStoreException wrapped in JDOUserException wrapped in
MetaException wrapped in InvocationException
+ MetaException me = new MetaException("Dummy exception");
+ me.initCause(jdoException);
+ InvocationTargetException ex = new InvocationTargetException(me);
+ Mockito.doThrow(me).when(mockBaseHandler).getMS();
+
+ IHMSHandler retryingHandler = RetryingHMSHandler.getProxy(conf,
mockBaseHandler, false);
+ try {
+ retryingHandler.getMS();
+ Assert.fail("should throw the mocked MetaException");
+ } catch (MetaException e) {
+ // expected
+ }
+ Mockito.verify(mockBaseHandler, Mockito.times(1)).getMS();
+ }
+
+ @Test
+ public void testRecoverableException() throws MetaException {
+ IHMSHandler mockBaseHandler = Mockito.mock(IHMSHandler.class);
+ Mockito.when(mockBaseHandler.getConf()).thenReturn(conf);
+ BatchUpdateException updateException = new BatchUpdateException();
+ NucleusDataStoreException nucleusException = new NucleusDataStoreException(
+ "Clear request failed: DELETE FROM `PARTITION_PARAMS` WHERE
`PART_ID`=?", updateException);
+ JDOUserException jdoException = new JDOUserException(
+ "One or more instances could not be deleted", nucleusException);
+ // BatchUpdateException wrapped in NucleusDataStoreException wrapped in
+ // JDOUserException wrapped in MetaException wrapped in InvocationException
+ MetaException me = new MetaException("Dummy exception");
+ me.initCause(jdoException);
+ InvocationTargetException ex = new InvocationTargetException(me);
+ Mockito.doThrow(me).when(mockBaseHandler).getMS();
+
+ IHMSHandler retryingHandler = RetryingHMSHandler.getProxy(conf,
mockBaseHandler, false);
+ try {
+ retryingHandler.getMS();
+ Assert.fail("should throw the mocked MetaException");
+ } catch (MetaException e) {
+ // expected
+ }
+ int retryTimes = MetastoreConf.getIntVar(conf,
ConfVars.HMS_HANDLER_ATTEMPTS);
+ Mockito.verify(mockBaseHandler, Mockito.times(retryTimes + 1)).getMS();
+ }
}