samperson1997 commented on a change in pull request #1721:
URL: https://github.com/apache/incubator-iotdb/pull/1721#discussion_r487388298



##########
File path: 
server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogWriter.java
##########
@@ -0,0 +1,340 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.metadata.logfile;
+
+import java.io.*;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.*;
+
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.exception.metadata.MetadataException;
+import org.apache.iotdb.db.metadata.MetadataConstant;
+import org.apache.iotdb.db.metadata.MetadataOperationType;
+import org.apache.iotdb.db.metadata.PartialPath;
+import org.apache.iotdb.db.metadata.mnode.MNode;
+import org.apache.iotdb.db.metadata.mnode.MeasurementMNode;
+import org.apache.iotdb.db.metadata.mnode.StorageGroupMNode;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.sys.*;
+import org.apache.iotdb.db.writelog.io.LogWriter;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MLogWriter implements AutoCloseable {
+
+  private static final Logger logger = 
LoggerFactory.getLogger(MLogWriter.class);
+  private File logFile;
+  private LogWriter logWriter;
+  private int logNum;
+  private ByteBuffer mlogBuffer = ByteBuffer.allocate(
+    IoTDBDescriptor.getInstance().getConfig().getMlogBufferSize());
+
+  public MLogWriter(String schemaDir, String logFileName) throws IOException {
+    File metadataDir = SystemFileFactory.INSTANCE.getFile(schemaDir);
+    if (!metadataDir.exists()) {
+      if (metadataDir.mkdirs()) {
+        logger.info("create schema folder {}.", metadataDir);
+      } else {
+        logger.warn("create schema folder {} failed.", metadataDir);
+      }
+    }
+
+    logFile = SystemFileFactory.INSTANCE.getFile(schemaDir + File.separator + 
logFileName);
+    // always flush
+    logWriter = new LogWriter(logFile, 0L);
+  }
+
+  public MLogWriter(String logFilePath) throws IOException {
+    logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
+    // always flush
+    logWriter = new LogWriter(logFile, 0L);
+  }
+
+  public void close() throws IOException {
+    sync();
+    logWriter.close();
+  }
+
+  private void sync() {
+    try {
+      logWriter.write(mlogBuffer);
+    } catch (IOException e) {
+      logger.error("MLog {} sync failed, change system mode to read-only", 
logFile.getAbsoluteFile(), e);
+      IoTDBDescriptor.getInstance().getConfig().setReadOnly(true);
+    }
+    mlogBuffer.clear();
+  }
+
+  private void putLog(PhysicalPlan plan) {
+    mlogBuffer.mark();
+    try {
+      plan.serialize(mlogBuffer);
+    } catch (BufferOverflowException e) {
+      logger.error("MLog {} BufferOverflow !", plan.getOperatorType(), e);
+      mlogBuffer.reset();
+      sync();
+      plan.serialize(mlogBuffer);
+    }
+    logNum ++;
+  }
+
+  public void createTimeseries(CreateTimeSeriesPlan plan, long offset) throws 
IOException {
+    try {
+      putLog(plan);
+      ChangeTagOffsetPlan changeTagOffsetPlan = new 
ChangeTagOffsetPlan(plan.getPath(), offset);
+      putLog(changeTagOffsetPlan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void deleteTimeseries(DeleteTimeSeriesPlan deleteTimeSeriesPlan) 
throws IOException {
+    try {
+      putLog(deleteTimeSeriesPlan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void setStorageGroup(PartialPath storageGroup) throws IOException {
+    try {
+      SetStorageGroupPlan plan = new SetStorageGroupPlan(storageGroup);
+      putLog(plan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void deleteStorageGroup(PartialPath storageGroup) throws IOException {
+    try {
+      DeleteStorageGroupPlan plan = new 
DeleteStorageGroupPlan(Collections.singletonList(storageGroup));
+      putLog(plan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void setTTL(PartialPath storageGroup, long ttl) throws IOException {
+    try {
+      SetTTLPlan plan = new SetTTLPlan(storageGroup, ttl);
+      putLog(plan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void changeOffset(PartialPath path, long offset) throws IOException {
+    try {
+      ChangeTagOffsetPlan plan = new ChangeTagOffsetPlan(path, offset);
+      putLog(plan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void changeAlias(PartialPath path, String alias) throws IOException {
+    try {
+      ChangeAliasPlan plan = new ChangeAliasPlan(path, alias);
+      putLog(plan);
+    } catch (BufferOverflowException e) {
+      throw new IOException(
+        "Log cannot fit into buffer, please increase mlog_buffer_size", e);
+    }
+  }
+
+  public void serializeMNode(MNode node) throws IOException {
+    try {
+      int childSize = 0;
+      if (node.getChildren() != null) {
+        childSize = node.getChildren().size();
+      }
+      MNodePlan plan = new MNodePlan(node.getName(), childSize);
+      putLog(plan);

Review comment:
       Ah... I understand, it used to be the snapshot file which serializes the 
MTree. @qiaojialin Do you think it is acceptable?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to