pkumarsinha commented on a change in pull request #1358:
URL: https://github.com/apache/hive/pull/1358#discussion_r466420110



##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
##########
@@ -132,31 +130,40 @@ private AtlasReplInfo createAtlasReplInfo() throws 
SemanticException, MalformedU
 
   private long lastStoredTimeStamp() throws SemanticException {
     Path prevMetadataPath = new Path(work.getPrevAtlasDumpDir(), 
EximUtil.METADATA_NAME);
-    BufferedReader br = null;
+    Retryable retryable = Retryable.builder()
+      .withHiveConf(conf)
+      .withRetryOnException(IOException.class)
+      .withFailOnException(FileNotFoundException.class).build();
     try {
-      FileSystem fs = prevMetadataPath.getFileSystem(conf);
-      br = new BufferedReader(new InputStreamReader(fs.open(prevMetadataPath), 
Charset.defaultCharset()));
-      String line = br.readLine();
-      if (line == null) {
-        throw new SemanticException("Could not read lastStoredTimeStamp from 
atlas metadata file");
-      }
-      String[] lineContents = line.split("\t", 5);
-      return Long.parseLong(lineContents[1]);
-    } catch (Exception ex) {
-      throw new SemanticException(ex);
-    } finally {
-      if (br != null) {
+      return retryable.executeCallable(() -> {
+        BufferedReader br = null;
         try {
-          br.close();
-        } catch (IOException e) {
-          throw new SemanticException(e);
+          FileSystem fs = prevMetadataPath.getFileSystem(conf);
+          br = new BufferedReader(new 
InputStreamReader(fs.open(prevMetadataPath), Charset.defaultCharset()));
+          String line = br.readLine();
+          if (line == null) {
+            throw new 
SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE

Review comment:
       lastStoredTimeStamp is maintained by hive itself. Should we have better 
error message category for this? 

##########
File path: common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
##########
@@ -505,18 +505,9 @@
           " queue: {1}. Please fix and try again.", true),
   SPARK_RUNTIME_OOM(20015, "Spark job failed because of out of memory."),
 
-  //if the error message is changed for REPL_EVENTS_MISSING_IN_METASTORE, then 
need modification in getNextNotification
-  //method in HiveMetaStoreClient
-  REPL_EVENTS_MISSING_IN_METASTORE(20016, "Notification events are missing in 
the meta store."),
-  REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(20017, "Load path {0} not valid as target 
database is bootstrapped " +
-          "from some other path : {1}."),
-  REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH(20018, "File is missing from both 
source and cm path."),
-  REPL_LOAD_PATH_NOT_FOUND(20019, "Load path does not exist."),
-  REPL_DATABASE_IS_NOT_SOURCE_OF_REPLICATION(20020,
-          "Source of replication (repl.source.for) is not set in the database 
properties."),
-  REPL_INVALID_DB_OR_TABLE_PATTERN(20021,
-          "Invalid pattern for the DB or table name in the replication policy. 
"
-                  + "It should be a valid regex enclosed within single or 
double quotes."),
+  REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH(20016, "File is missing from both 
source and cm path."),
+  REPL_EXTERNAL_SERVICE_CONNECTION_ERROR(20017, "Failed to connect to {0} 
service. Error code {1}.",
+    true),

Review comment:
       nit: Can accommodate in same line.

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
##########
@@ -42,11 +43,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Serializable;
+import java.io.*;

Review comment:
       Should we revert this?

##########
File path: 
ql/src/java/org/apache/hadoop/hive/ql/exec/repl/atlas/AtlasRestClientImpl.java
##########
@@ -125,17 +127,15 @@ private AtlasImportResult 
getDefaultAtlasImportResult(AtlasImportRequest request
     return new AtlasImportResult(request, "", "", "", 0L);
   }
 
-  public AtlasServer getServer(String endpoint) throws SemanticException {
+  public AtlasServer getServer(String endpoint, HiveConf conf) throws 
SemanticException {
+    Retryable retryable = Retryable.builder()
+      .withHiveConf(conf)
+      .withRetryOnException(Exception.class).build();

Review comment:
       Should we not retry on just AtlasServiceException and catch finally only 
that exception as that's what getServer says to throw?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java
##########
@@ -43,15 +44,15 @@
  */
 public class DirCopyTask extends Task<DirCopyWork> implements Serializable {
   private static final Logger LOG = LoggerFactory.getLogger(DirCopyTask.class);
-  private static final int MAX_COPY_RETRY = 5;
 
   private boolean createAndSetPathOwner(Path destPath, Path sourcePath) throws 
IOException {
     FileSystem targetFs = destPath.getFileSystem(conf);
     boolean createdDir = false;
     if (!targetFs.exists(destPath)) {
       // target path is created even if the source path is missing, so that 
ddl task does not try to create it.
       if (!targetFs.mkdirs(destPath)) {
-        throw new IOException(destPath + " is not a directory or unable to 
create one");
+        throw new IOException(ErrorMsg.REPL_FILE_SYSTEM_OPERATION_RETRY.format(

Review comment:
       This should be retryable for communication failure and non-retryable for 
the permission related issue

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
##########
@@ -668,22 +666,25 @@ private void cleanFailedEventDirIfExists(Path dumpDir, 
long resumeFrom) throws S
   }
 
   private long getResumeFrom(Path ackFile) throws SemanticException {
-    BufferedReader br = null;
+    Retryable retryable = Retryable.builder()
+      .withHiveConf(conf)
+      .withRetryOnException(Exception.class).build();
     try {
-      FileSystem fs = ackFile.getFileSystem(conf);
-      br = new BufferedReader(new InputStreamReader(fs.open(ackFile), 
Charset.defaultCharset()));
-      long lastEventID = Long.parseLong(br.readLine());
-      return lastEventID;
-    } catch (Exception ex) {
-      throw new SemanticException(ex);
-    } finally {
-      if (br != null) {
+      return retryable.executeCallable(() -> {
+        BufferedReader br = null;
         try {
-          br.close();
-        } catch (IOException e) {
-          throw new SemanticException(e);
+          FileSystem fs = ackFile.getFileSystem(conf);
+          br = new BufferedReader(new InputStreamReader(fs.open(ackFile), 
Charset.defaultCharset()));
+          long lastEventID = Long.parseLong(br.readLine());
+          return lastEventID;
+        } finally {
+          if (br != null) {
+            br.close();

Review comment:
       Should we ignore exception during close here as well?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
##########
@@ -132,31 +130,40 @@ private AtlasReplInfo createAtlasReplInfo() throws 
SemanticException, MalformedU
 
   private long lastStoredTimeStamp() throws SemanticException {
     Path prevMetadataPath = new Path(work.getPrevAtlasDumpDir(), 
EximUtil.METADATA_NAME);
-    BufferedReader br = null;
+    Retryable retryable = Retryable.builder()
+      .withHiveConf(conf)
+      .withRetryOnException(IOException.class)
+      .withFailOnException(FileNotFoundException.class).build();
     try {
-      FileSystem fs = prevMetadataPath.getFileSystem(conf);
-      br = new BufferedReader(new InputStreamReader(fs.open(prevMetadataPath), 
Charset.defaultCharset()));
-      String line = br.readLine();
-      if (line == null) {
-        throw new SemanticException("Could not read lastStoredTimeStamp from 
atlas metadata file");
-      }
-      String[] lineContents = line.split("\t", 5);
-      return Long.parseLong(lineContents[1]);
-    } catch (Exception ex) {
-      throw new SemanticException(ex);
-    } finally {
-      if (br != null) {
+      return retryable.executeCallable(() -> {
+        BufferedReader br = null;
         try {
-          br.close();
-        } catch (IOException e) {
-          throw new SemanticException(e);
+          FileSystem fs = prevMetadataPath.getFileSystem(conf);
+          br = new BufferedReader(new 
InputStreamReader(fs.open(prevMetadataPath), Charset.defaultCharset()));
+          String line = br.readLine();
+          if (line == null) {
+            throw new 
SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE
+              .format("Could not read lastStoredTimeStamp from atlas metadata 
file", "atlas"));
+          }
+          String[] lineContents = line.split("\t", 5);
+          return Long.parseLong(lineContents[1]);
+        } finally {
+          if (br != null) {
+            try {
+              br.close();
+            } catch (IOException e) {
+              //Do nothing
+            }
+          }
         }
-      }
+      });
+    } catch (Exception e) {
+      throw new 
SemanticException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);

Review comment:
       When the Exception 'e' is of type SemanticException, we  can skip 
creation of another SemanticException .

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
##########
@@ -196,12 +203,14 @@ private String checkHiveEntityGuid(AtlasRequestBuilder 
atlasRequestBuilder, Stri
     AtlasObjectId objectId = atlasRequestBuilder.getItemToExport(clusterName, 
srcDb);
     Set<Map.Entry<String, Object>> entries = 
objectId.getUniqueAttributes().entrySet();
     if (entries == null || entries.isEmpty()) {
-      throw new SemanticException("Could find entries in objectId for:" + 
clusterName);
+      throw new 
SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.format("Could find " 
+
+        "entries in objectId for:" + clusterName, "atlas"));

Review comment:
       Can we have one constant defined per service and use that during 
REPL_INVALID_CONFIG_FOR_SERVICE.format() everywhere?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java
##########
@@ -113,26 +115,30 @@ AtlasReplInfo createAtlasReplInfo() throws 
SemanticException, MalformedURLExcept
 
   private String getStoredFsUri(Path atlasDumpDir) throws SemanticException {
     Path metadataPath = new Path(atlasDumpDir, EximUtil.METADATA_NAME);
-    BufferedReader br = null;
+    Retryable retryable = Retryable.builder()
+      .withHiveConf(conf)
+      .withRetryOnException(IOException.class).build();
     try {
-      FileSystem fs = metadataPath.getFileSystem(conf);
-      br = new BufferedReader(new InputStreamReader(fs.open(metadataPath), 
Charset.defaultCharset()));
-      String line = br.readLine();
-      if (line == null) {
-        throw new SemanticException("Could not read stored src FS Uri from 
atlas metadata file");
-      }
-      String[] lineContents = line.split("\t", 5);
-      return lineContents[0];
-    } catch (Exception ex) {
-      throw new SemanticException(ex);
-    } finally {
-      if (br != null) {
+      return retryable.executeCallable(() -> {
+        BufferedReader br = null;
         try {
-          br.close();
-        } catch (IOException e) {
-          throw new SemanticException(e);
+          FileSystem fs = metadataPath.getFileSystem(conf);
+          br = new BufferedReader(new InputStreamReader(fs.open(metadataPath), 
Charset.defaultCharset()));
+          String line = br.readLine();
+          if (line == null) {
+            throw new 
SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.format("Could not 
read stored " +

Review comment:
       Can we have separate Error message type for such runtime error cases 
cases and use REPL_INVALID_CONFIG_FOR_SERVICE when it is to do only with CONF 
related errors?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
##########
@@ -132,31 +130,40 @@ private AtlasReplInfo createAtlasReplInfo() throws 
SemanticException, MalformedU
 
   private long lastStoredTimeStamp() throws SemanticException {
     Path prevMetadataPath = new Path(work.getPrevAtlasDumpDir(), 
EximUtil.METADATA_NAME);
-    BufferedReader br = null;
+    Retryable retryable = Retryable.builder()

Review comment:
       In createAtlasReplInfo(), we have some cases where we have wrong/no 
config being passed. We should classify them at least. They aren't retryable. 
eg:
   String tgtDB = 
ReplUtils.getNonEmpty(HiveConf.ConfVars.REPL_ATLAS_REPLICATED_TO_DB.varname, 
conf, errorFormat);
       String srcCluster = 
ReplUtils.getNonEmpty(HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME.varname, conf, 
errorFormat);
       String tgtCluster = 
ReplUtils.getNonEmpty(HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME.varname, conf, 
errorFormat);
   

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java
##########
@@ -86,107 +87,59 @@ private boolean checkIfPathExist(Path sourcePath, 
UserGroupInformation proxyUser
     return proxyUser.doAs((PrivilegedExceptionAction<Boolean>) () -> 
sourcePath.getFileSystem(conf).exists(sourcePath));
   }
 
-  private int handleException(Exception e, Path sourcePath, Path targetPath,
-                              int currentRetry, UserGroupInformation 
proxyUser) {
-    try {
-      LOG.info("Checking if source path " + sourcePath + " is missing for 
exception ", e);
-      if (!checkIfPathExist(sourcePath, proxyUser)) {
-        LOG.info("Source path is missing. Ignoring exception.");
-        return 0;
-      }
-    } catch (Exception ex) {
-      LOG.warn("Source path missing check failed. ", ex);
-    }
-    // retry logic only for i/o exception
-    if (!(e instanceof IOException)) {
-      LOG.error("Unable to copy {} to {}", sourcePath, targetPath, e);
-      setException(e);
-      return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-    }
-
-    if (currentRetry <= MAX_COPY_RETRY) {
-      LOG.warn("Unable to copy {} to {}", sourcePath, targetPath, e);
-    } else {
-      LOG.error("Unable to copy {} to {} even after retrying for {} time", 
sourcePath, targetPath, currentRetry, e);
-      setException(e);
-      return ErrorMsg.REPL_FILE_SYSTEM_OPERATION_RETRY.getErrorCode();
-    }
-    int sleepTime = FileUtils.getSleepTime(currentRetry);
-    LOG.info("Sleep for " + sleepTime + " milliseconds before retry no " + 
(currentRetry));
-    try {
-      Thread.sleep(sleepTime);
-    } catch (InterruptedException timerEx) {
-      LOG.info("Sleep interrupted", timerEx.getMessage());
-    }
-    try {
-      if (proxyUser == null) {
-        proxyUser = Utils.getUGI();
-      }
-      FileSystem.closeAllForUGI(proxyUser);
-    } catch (Exception ex) {
-      LOG.warn("Unable to closeAllForUGI for user " + proxyUser, ex);
-    }
-    return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-  }
-
   @Override
   public int execute() {
     String distCpDoAsUser = 
conf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER);
+    Retryable retryable = Retryable.builder()
+      .withHiveConf(conf)
+      .withRetryOnException(IOException.class).build();
+    try {
+      return retryable.executeCallable(() -> {
+        UserGroupInformation proxyUser = null;
+        Path sourcePath = work.getFullyQualifiedSourcePath();
+        Path targetPath = work.getFullyQualifiedTargetPath();
+        try {
+          if 
(conf.getBoolVar(HiveConf.ConfVars.REPL_ADD_RAW_RESERVED_NAMESPACE)) {
+            sourcePath = 
reservedRawPath(work.getFullyQualifiedSourcePath().toUri());
+            targetPath = 
reservedRawPath(work.getFullyQualifiedTargetPath().toUri());
+          }
+          UserGroupInformation ugi = Utils.getUGI();
+          String currentUser = ugi.getShortUserName();
+          if (distCpDoAsUser != null && !currentUser.equals(distCpDoAsUser)) {
+            proxyUser = UserGroupInformation.createProxyUser(
+              distCpDoAsUser, UserGroupInformation.getLoginUser());
+          }
 
-    Path sourcePath = work.getFullyQualifiedSourcePath();
-    Path targetPath = work.getFullyQualifiedTargetPath();
-    if (conf.getBoolVar(HiveConf.ConfVars.REPL_ADD_RAW_RESERVED_NAMESPACE)) {
-      sourcePath = reservedRawPath(work.getFullyQualifiedSourcePath().toUri());
-      targetPath = reservedRawPath(work.getFullyQualifiedTargetPath().toUri());
-    }
-    int currentRetry = 0;
-    int error = 0;
-    UserGroupInformation proxyUser = null;
-    while (currentRetry <= MAX_COPY_RETRY) {
-      try {
-        UserGroupInformation ugi = Utils.getUGI();
-        String currentUser = ugi.getShortUserName();
-        if (distCpDoAsUser != null && !currentUser.equals(distCpDoAsUser)) {
-          proxyUser = UserGroupInformation.createProxyUser(
-                  distCpDoAsUser, UserGroupInformation.getLoginUser());
-        }
-
-        setTargetPathOwner(targetPath, sourcePath, proxyUser);
-
-        // do we create a new conf and only here provide this additional 
option so that we get away from
-        // differences of data in two location for the same directories ?
-        // basically add distcp.options.delete to hiveconf new object ?
-        FileUtils.distCp(
-                sourcePath.getFileSystem(conf), // source file system
-                Collections.singletonList(sourcePath),  // list of source paths
-                targetPath,
-                false,
-                proxyUser,
-                conf,
-                ShimLoader.getHadoopShims());
-        return 0;
-      } catch (Exception e) {
-        currentRetry++;
-        error = handleException(e, sourcePath, targetPath, currentRetry, 
proxyUser);
-        if (error == 0) {
-          return 0;
-        }
-      } finally {
-        if (proxyUser != null) {
+          setTargetPathOwner(targetPath, sourcePath, proxyUser);
           try {
-            FileSystem.closeAllForUGI(proxyUser);
-          } catch (IOException e) {
-            LOG.error("Unable to closeAllForUGI for user " + proxyUser, e);
-            if (error == 0) {
-              setException(e);
-              error = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
+            if (!checkIfPathExist(sourcePath, proxyUser)) {
+              LOG.info("Source path is missing. Ignoring exception.");
+              return 0;
             }
-            break;
+          } catch (Exception ex) {
+            LOG.warn("Source path missing check failed. ", ex);

Review comment:
       There is a change in behavior here. When we have "Source path missing 
check failed. " we are still going for DistCp, earlier distCp wasn't attempted 
in this case only retry was. 

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
##########
@@ -177,7 +184,7 @@ long dumpAtlasMetaData(AtlasRequestBuilder 
atlasRequestBuilder, AtlasReplInfo at
     } catch (SemanticException ex) {
       throw ex;
     } catch (Exception ex) {
-      throw new SemanticException(ex);
+      throw new SemanticException(ex.getMessage(), ex);
     } finally {
       if (inputStream != null) {

Review comment:
       At this point, we have  some  thing like this:
   if (inputStream != null) {
           try {
             inputStream.close();
           } catch (IOException e) {
             throw new SemanticException(e);
           }
         }
   Since we are ignoring the exception during close in lastStoredTimeStamp(), 
let's align this also.

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java
##########
@@ -92,14 +92,21 @@ public int execute() {
       }
       URL url = work.getRangerConfigResource();
       if (url == null) {
-        throw new SemanticException("Ranger configuration is not valid "
-          + ReplUtils.RANGER_CONFIGURATION_RESOURCE_NAME);
+        throw new SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE
+          .format("Ranger configuration is not valid "
+          + ReplUtils.RANGER_CONFIGURATION_RESOURCE_NAME, "ranger"));
       }
       conf.addResource(url);
       String rangerHiveServiceName = 
conf.get(ReplUtils.RANGER_HIVE_SERVICE_NAME);
       String rangerEndpoint = conf.get(ReplUtils.RANGER_REST_URL);
-      if (StringUtils.isEmpty(rangerEndpoint) || 
!rangerRestClient.checkConnection(rangerEndpoint, conf)) {
-        throw new SemanticException("Ranger endpoint is not valid " + 
rangerEndpoint);
+      if (StringUtils.isEmpty(rangerEndpoint)) {
+        throw new SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE
+          .format("Ranger endpoint is not valid "
+            + rangerEndpoint, "ranger"));
+      }
+      if (!rangerRestClient.checkConnection(rangerEndpoint, conf)) {
+        throw new 
SemanticException(ErrorMsg.REPL_EXTERNAL_SERVICE_CONNECTION_ERROR.format("ranger",
+          "Ranger endpoint is not valid " + rangerEndpoint));

Review comment:
       The error message could be bit different like "Ranger endpoint is either 
invalid or not reachable"

##########
File path: 
ql/src/java/org/apache/hadoop/hive/ql/exec/repl/atlas/AtlasRestClientBuilder.java
##########
@@ -92,7 +94,7 @@ private void initializeAtlasApplicationProperties() throws 
SemanticException {
       props.setProperty(ATLAS_PROPERTY_AUTH_KERBEROS, "true");
       
ApplicationProperties.set(ConfigurationConverter.getConfiguration(props));
     } catch (AtlasException e) {
-      throw new SemanticException(e);
+      throw new 
SemanticException(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.format(), e);

Review comment:
       Service name not needed, here?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org
For additional commands, e-mail: gitbox-h...@hive.apache.org

Reply via email to