HorizonNet commented on a change in pull request #1034: HBASE-23622 Reduced the 
number of Checkstyle violations in hbase-common
URL: https://github.com/apache/hbase/pull/1034#discussion_r366483222
 
 

 ##########
 File path: 
hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
 ##########
 @@ -557,71 +508,45 @@ static void setStoragePolicy(final FileSystem fs, final 
Path path, final String
    */
   private static void invokeSetStoragePolicy(final FileSystem fs, final Path 
path,
       final String storagePolicy) throws IOException {
-    Method m = null;
     Exception toThrow = null;
+
     try {
-      m = fs.getClass().getDeclaredMethod("setStoragePolicy",
-        new Class<?>[] { Path.class, String.class });
-      m.setAccessible(true);
-    } catch (NoSuchMethodException e) {
-      toThrow = e;
-      final String msg = "FileSystem doesn't support setStoragePolicy; 
HDFS-6584 not available";
-      if (!warningMap.containsKey(fs)) {
-        warningMap.put(fs, true);
-        LOG.warn(msg, e);
-      } else if (LOG.isDebugEnabled()) {
-        LOG.debug(msg, e);
+      fs.setStoragePolicy(path, storagePolicy);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
       }
-      m = null;
-    } catch (SecurityException e) {
+    } catch (Exception e) {
       toThrow = e;
-      final String msg = "No access to setStoragePolicy on FileSystem; 
HDFS-6584 not available";
+      // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
+      // misuse than a runtime problem with HDFS.
       if (!warningMap.containsKey(fs)) {
         warningMap.put(fs, true);
-        LOG.warn(msg, e);
+        LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" 
+ path, e);
       } else if (LOG.isDebugEnabled()) {
-        LOG.debug(msg, e);
+        LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
       }
-      m = null; // could happen on setAccessible()
-    }
-    if (m != null) {
-      try {
-        m.invoke(fs, path, storagePolicy);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + 
path);
-        }
-      } catch (Exception e) {
-        toThrow = e;
-        // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
-        // misuse than a runtime problem with HDFS.
-        if (!warningMap.containsKey(fs)) {
-          warningMap.put(fs, true);
-          LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
-        } else if (LOG.isDebugEnabled()) {
-          LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
-        }
-        // check for lack of HDFS-7228
-        if (e instanceof InvocationTargetException) {
-          final Throwable exception = e.getCause();
-          if (exception instanceof RemoteException &&
-              HadoopIllegalArgumentException.class.getName().equals(
-                ((RemoteException)exception).getClassName())) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Given storage policy, '" +storagePolicy +"', was 
rejected and probably " +
-                "isn't a valid policy for the version of Hadoop you're 
running. I.e. if you're " +
-                "trying to use SSD related policies then you're likely missing 
HDFS-7228. For " +
-                "more information see the 'ArchivalStorage' docs for your 
Hadoop release.");
-            }
+      // check for lack of HDFS-7228
+      if (e instanceof InvocationTargetException) {
 
 Review comment:
   Updated the changes to unwrap the `InvocationTargetException` check.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to