fanhualta commented on a change in pull request #32: fix sonar issues
URL: https://github.com/apache/incubator-iotdb/pull/32#discussion_r251861526
 
 

 ##########
 File path: 
iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeManager.java
 ##########
 @@ -961,99 +950,80 @@ private void close(String processorName) throws 
FileNodeManagerException {
    */
   public synchronized boolean deleteAll() throws FileNodeManagerException {
     LOGGER.info("Start deleting all filenode");
-    if (fileNodeManagerStatus == FileNodeManagerStatus.NONE) {
-      fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
-      try {
-        Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = 
processorMap.entrySet()
-            .iterator();
-        while (processorIterator.hasNext()) {
-          Map.Entry<String, FileNodeProcessor> processorEntry = 
processorIterator.next();
-          try {
-            delete(processorEntry.getKey(), processorIterator);
-          } catch (FileNodeManagerException e) {
-            throw e;
-          }
-        }
-        return processorMap.isEmpty();
-      } catch (FileNodeManagerException e) {
-        throw new FileNodeManagerException(e);
-      } finally {
-        LOGGER.info("Delete all filenode processor successfully");
-        fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-      }
-    } else {
+    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
       LOGGER.info("Failed to delete all filenode processor because of merge 
operation");
       return false;
     }
+
+    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
+    try {
+      Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = 
processorMap.entrySet()
+              .iterator();
+      while (processorIterator.hasNext()) {
+        Map.Entry<String, FileNodeProcessor> processorEntry = 
processorIterator.next();
+        delete(processorEntry.getKey(), processorIterator);
+      }
+      return processorMap.isEmpty();
+    } finally {
+      LOGGER.info("Deleting all FileNodeProcessors ends");
+      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
+    }
   }
 
   /**
    * Try to close All.
    */
   public void closeAll() throws FileNodeManagerException {
     LOGGER.info("Start closing all filenode processor");
-    if (fileNodeManagerStatus == FileNodeManagerStatus.NONE) {
-      fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
-      try {
-        Iterator<Map.Entry<String, FileNodeProcessor>> processorIterator = 
processorMap.entrySet()
-            .iterator();
-        while (processorIterator.hasNext()) {
-          Map.Entry<String, FileNodeProcessor> processorEntry = 
processorIterator.next();
-          try {
-            close(processorEntry.getKey());
-          } catch (FileNodeManagerException e) {
-            throw e;
-          }
-        }
-      } catch (FileNodeManagerException e) {
-        throw new FileNodeManagerException(e);
-      } finally {
-        LOGGER.info("Close all filenode processor successfully");
-        fileNodeManagerStatus = FileNodeManagerStatus.NONE;
-      }
-    } else {
+    if (fileNodeManagerStatus != FileNodeManagerStatus.NONE) {
       LOGGER.info("Failed to close all filenode processor because of merge 
operation");
+      return;
+    }
+    fileNodeManagerStatus = FileNodeManagerStatus.CLOSE;
+    try {
+      for (Map.Entry<String, FileNodeProcessor> processorEntry : 
processorMap.entrySet()) {
+        close(processorEntry.getKey());
+      }
+    } finally {
+      LOGGER.info("Close all FileNodeProcessors ends");
+      fileNodeManagerStatus = FileNodeManagerStatus.NONE;
     }
   }
 
   /**
    * force flush to control memory usage.
    */
   public void forceFlush(BasicMemController.UsageLevel level) {
-    // TODO : for each FileNodeProcessor, call its forceFlush()
     // you may add some delicate process like below
     // or you could provide multiple methods for different urgency
     switch (level) {
+      // only select the most urgent (most active or biggest in size)
+      // processors to flush
+      // only select top 10% active memory user to flush
       case WARNING:
-        // only select the most urgent (most active or biggest in size)
-        // processors to flush
-        // only select top 10% active memory user to flush
         try {
           flushTop(0.1f);
         } catch (IOException e) {
-          LOGGER.error("force flush memory data error: {}", e.getMessage());
-          e.printStackTrace();
+          LOGGER.error("force flush memory data error: {}", e);
         }
         break;
+      // force all processors to flush
       case DANGEROUS:
-        // force all processors to flush
         try {
           flushAll();
         } catch (IOException e) {
-          LOGGER.error("force flush memory data error: {}", e.getMessage());
-          e.printStackTrace();
+          LOGGER.error("force flush memory data error: {}", e);
         }
         break;
+      // if the flush thread pool is not full ( or half full), start a new
+      // flush task
       case SAFE:
-        // if the flush thread pool is not full ( or half full), start a new
-        // flush task
         if (FlushManager.getInstance().getActiveCnt() < 0.5 * 
FlushManager.getInstance()
-            .getThreadCnt()) {
+                .getThreadCnt()) {
           try {
             flushTop(0.01f);
           } catch (IOException e) {
-            LOGGER.error("force flush memory data error:{}", e.getMessage());
-            e.printStackTrace();
+            LOGGER.error("force flush memory data error:{}", e);
 
 Review comment:
   {} is unneccessary. There are many similar situations below.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to