MyXOF commented on a change in pull request #32: fix sonar issues
URL: https://github.com/apache/incubator-iotdb/pull/32#discussion_r251707896
 
 

 ##########
 File path: 
iotdb/src/main/java/org/apache/iotdb/db/engine/filenode/FileNodeProcessor.java
 ##########
 @@ -830,142 +838,112 @@ public void appendFile(IntervalFileNode appendFile, 
String appendFilePath)
    * @param appendFile the appended tsfile information
    */
   public List<String> getOverlapFiles(IntervalFileNode appendFile, String uuid)
-      throws FileNodeProcessorException {
+          throws FileNodeProcessorException {
     List<String> overlapFiles = new ArrayList<>();
     try {
       for (IntervalFileNode intervalFileNode : newFileNodes) {
-        for (Entry<String, Long> entry : 
appendFile.getStartTimeMap().entrySet()) {
-          if (!intervalFileNode.getStartTimeMap().containsKey(entry.getKey())) 
{
-            continue;
-          }
-          if (intervalFileNode.getEndTime(entry.getKey()) >= entry.getValue()
+        getOverlapFiles(appendFile, intervalFileNode, uuid, overlapFiles);
+      }
+    } catch (IOException e) {
+      LOGGER.error("Failed to get overlap tsfiles which conflict with the 
appendFile.");
+      throw new FileNodeProcessorException(e);
+    }
+    return overlapFiles;
+  }
+
+  private void getOverlapFiles(IntervalFileNode appendFile, IntervalFileNode 
intervalFileNode,
+                               String uuid, List<String> overlapFiles) throws 
IOException {
+    for (Entry<String, Long> entry : appendFile.getStartTimeMap().entrySet()) {
+      if (intervalFileNode.getStartTimeMap().containsKey(entry.getKey()) &&
+              intervalFileNode.getEndTime(entry.getKey()) >= entry.getValue()
               && intervalFileNode.getStartTime(entry.getKey()) <= appendFile
               .getEndTime(entry.getKey())) {
-            String relativeFilePath = "postback" + File.separator + uuid + 
File.separator + "backup"
+        String relativeFilePath = "postback" + File.separator + uuid + 
File.separator + "backup"
                 + File.separator + intervalFileNode.getRelativePath();
-            File newFile = new File(
+        File newFile = new File(
                 
Directories.getInstance().getTsFileFolder(intervalFileNode.getBaseDirIndex()),
                 relativeFilePath);
-            if (!newFile.getParentFile().exists()) {
-              newFile.getParentFile().mkdirs();
-            }
-            java.nio.file.Path link = 
FileSystems.getDefault().getPath(newFile.getPath());
-            java.nio.file.Path target = FileSystems.getDefault()
-                .getPath(intervalFileNode.getFilePath());
-            Files.createLink(link, target);
-            overlapFiles.add(newFile.getPath());
-            break;
-          }
+        if (!newFile.getParentFile().exists()) {
+          newFile.getParentFile().mkdirs();
         }
+        java.nio.file.Path link = 
FileSystems.getDefault().getPath(newFile.getPath());
+        java.nio.file.Path target = FileSystems.getDefault()
+                .getPath(intervalFileNode.getFilePath());
+        Files.createLink(link, target);
+        overlapFiles.add(newFile.getPath());
+        break;
       }
-    } catch (IOException e) {
-      LOGGER.error("Failed to get overlap tsfiles which conflict with the 
appendFile.");
-      throw new FileNodeProcessorException(e);
     }
-    return overlapFiles;
   }
 
   /**
    * add time series.
    */
   public void addTimeSeries(String measurementToString, String dataType, 
String encoding) {
     ColumnSchema col = new ColumnSchema(measurementToString, 
TSDataType.valueOf(dataType),
-        TSEncoding.valueOf(encoding));
-    JSONObject measurement = constrcutMeasurement(col);
+            TSEncoding.valueOf(encoding));
+    JSONObject measurement = FileSchemaUtils.constructJsonColumnSchema(col);
     
fileSchema.registerMeasurement(JsonConverter.convertJsonToMeasurementSchema(measurement));
   }
 
-  private JSONObject constrcutMeasurement(ColumnSchema columnSchema) {
-    JSONObject measurement = new JSONObject();
-    measurement.put(JsonFormatConstant.MEASUREMENT_UID, 
columnSchema.getName());
-    measurement.put(JsonFormatConstant.DATA_TYPE, 
columnSchema.dataType.toString());
-    measurement.put(JsonFormatConstant.MEASUREMENT_ENCODING, 
columnSchema.encoding.toString());
-    for (Entry<String, String> entry : columnSchema.getArgsMap().entrySet()) {
-      if (JsonFormatConstant.ENUM_VALUES.equals(entry.getKey())) {
-        String[] valueArray = entry.getValue().split(",");
-        measurement.put(JsonFormatConstant.ENUM_VALUES, new 
JSONArray(valueArray));
-      } else {
-        measurement.put(entry.getKey(), entry.getValue().toString());
-      }
-    }
-    return measurement;
-  }
-
   /**
    * submit the merge task to the <code>MergePool</code>.
    *
    * @return null -can't submit the merge task, because this filenode is not 
overflowed or it is
-   * merging now. Future<?> - submit the merge task successfully.
+   * merging now. Future - submit the merge task successfully.
    */
-  public Future<?> submitToMerge() {
+  public Future submitToMerge() {
+    ZoneId zoneId = IoTDBDescriptor.getInstance().getConfig().getZoneID();
     if (lastMergeTime > 0) {
       long thisMergeTime = System.currentTimeMillis();
       long mergeTimeInterval = thisMergeTime - lastMergeTime;
-      ZonedDateTime lastDateTime = 
ZonedDateTime.ofInstant(Instant.ofEpochMilli(lastMergeTime),
-          IoTDBDescriptor.getInstance().getConfig().getZoneID());
-      ZonedDateTime thisDateTime = 
ZonedDateTime.ofInstant(Instant.ofEpochMilli(thisMergeTime),
-          IoTDBDescriptor.getInstance().getConfig().getZoneID());
+      ZonedDateTime lastDateTime = 
ofInstant(Instant.ofEpochMilli(lastMergeTime),
 
 Review comment:
   应该写,ZonedDateTime lastDateTime = 
ZonedDateTime.ofInstant(Instant.ofEpochMilli(lastMergeTime), zoneId)

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to