qiaojialin commented on a change in pull request #1721:
URL: https://github.com/apache/iotdb/pull/1721#discussion_r542125127
##########
File path: server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
##########
@@ -204,12 +204,18 @@
/**
* Size of log buffer in each log node(in byte). If WAL is enabled and the
size of a insert plan
- * is smaller than this parameter, then the insert plan will be rejected by
WAL.
+ * is larger than this parameter, then the insert plan will be rejected by
WAL.
*/
private int walBufferSize = 16 * 1024 * 1024;
private int estimatedSeriesSize = 300;
+ /**
+ * Size of log buffer for every MetaData operation. If the size of a
MetaData operation plan
+ * is larger than this parameter, then the MetaData operation plan will be
rejected by MManager.
+ */
+ private int mlogBufferSize = 1024 * 1024;
Review comment:
make this consistent with iotdb-engine.properties?
##########
File path: server/src/main/java/org/apache/iotdb/db/writelog/io/LogWriter.java
##########
@@ -18,36 +18,59 @@
*/
package org.apache.iotdb.db.writelog.io;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.utils.TestOnly;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.File;
+import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.util.zip.CRC32;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
/**
- * LogWriter writes the binarized logs into a file using FileChannel together
with check sums of
+ * LogWriter writes the binary logs into a file using FileChannel together
with check sums of
* each log calculated using CRC32.
*/
public class LogWriter implements ILogWriter {
+ private static final Logger logger =
LoggerFactory.getLogger(LogWriter.class);
private File logFile;
private FileOutputStream fileOutputStream;
private FileChannel channel;
- private CRC32 checkSummer = new CRC32();
- private IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private ByteBuffer lengthBuffer = ByteBuffer.allocate(4);
- private ByteBuffer checkSumBuffer = ByteBuffer.allocate(8);
+ private final CRC32 checkSummer = new CRC32();
+ private final ByteBuffer lengthBuffer = ByteBuffer.allocate(4);
+ private final ByteBuffer checkSumBuffer = ByteBuffer.allocate(8);
+ private final long forcePeriodInMs;
- public LogWriter(String logFilePath) {
+ /**
+ * @param logFilePath
+ * @param forcePeriodInMs
+ * @throws FileNotFoundException
+ */
+ @TestOnly
+ public LogWriter(String logFilePath, long forcePeriodInMs) throws
FileNotFoundException {
logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
+ this.forcePeriodInMs = forcePeriodInMs;
+
+ if (channel == null) {
Review comment:
this is always true, said by idea..
##########
File path: server/src/main/java/org/apache/iotdb/db/qp/logical/Operator.java
##########
@@ -77,7 +77,8 @@ public String toString() {
TTL, DELETE_STORAGE_GROUP, LOAD_CONFIGURATION, SHOW, LOAD_FILES,
REMOVE_FILE, MOVE_FILE, LAST, GROUP_BY_FILL,
ALTER_TIMESERIES, FLUSH, MERGE, FULL_MERGE, CLEAR_CACHE,
SHOW_MERGE_STATUS, CREATE_SCHEMA_SNAPSHOT, TRACING, DELETE_PARTITION,
- CREATE_MULTI_TIMESERIES
- , CREATE_INDEX, DROP_INDEX, QUERY_INDEX,
+ CREATE_MULTI_TIMESERIES, CREATE_INDEX, DROP_INDEX, QUERY_INDEX,
+ CHANGE_TAG_OFFSET, CHANGE_ALIAS, MNODE,
+ MEASUREMENT_MNODE, STORAGE_GROUP_MNODE;
Review comment:
This could be removed, the operator is only generated in SQL parser.
##########
File path: server/src/main/java/org/apache/iotdb/db/writelog/io/LogWriter.java
##########
@@ -18,36 +18,59 @@
*/
package org.apache.iotdb.db.writelog.io;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.utils.TestOnly;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.File;
+import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.util.zip.CRC32;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
/**
- * LogWriter writes the binarized logs into a file using FileChannel together
with check sums of
+ * LogWriter writes the binary logs into a file using FileChannel together
with check sums of
* each log calculated using CRC32.
*/
public class LogWriter implements ILogWriter {
+ private static final Logger logger =
LoggerFactory.getLogger(LogWriter.class);
private File logFile;
private FileOutputStream fileOutputStream;
private FileChannel channel;
- private CRC32 checkSummer = new CRC32();
- private IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private ByteBuffer lengthBuffer = ByteBuffer.allocate(4);
- private ByteBuffer checkSumBuffer = ByteBuffer.allocate(8);
+ private final CRC32 checkSummer = new CRC32();
+ private final ByteBuffer lengthBuffer = ByteBuffer.allocate(4);
+ private final ByteBuffer checkSumBuffer = ByteBuffer.allocate(8);
+ private final long forcePeriodInMs;
- public LogWriter(String logFilePath) {
+ /**
+ * @param logFilePath
+ * @param forcePeriodInMs
+ * @throws FileNotFoundException
+ */
+ @TestOnly
+ public LogWriter(String logFilePath, long forcePeriodInMs) throws
FileNotFoundException {
logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
+ this.forcePeriodInMs = forcePeriodInMs;
+
+ if (channel == null) {
+ fileOutputStream = new FileOutputStream(logFile, true);
+ channel = fileOutputStream.getChannel();
+ }
}
- public LogWriter(File logFile) {
+ public LogWriter(File logFile, long forcePeriodInMs) throws
FileNotFoundException {
this.logFile = logFile;
+ this.forcePeriodInMs = forcePeriodInMs;
+
+ if (channel == null) {
Review comment:
this is always true, said by idea..
##########
File path:
server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogWriter.java
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.metadata.logfile;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.exception.metadata.MetadataException;
+import org.apache.iotdb.db.metadata.MetadataConstant;
+import org.apache.iotdb.db.metadata.MetadataOperationType;
+import org.apache.iotdb.db.metadata.PartialPath;
+import org.apache.iotdb.db.metadata.mnode.MNode;
+import org.apache.iotdb.db.metadata.mnode.MeasurementMNode;
+import org.apache.iotdb.db.metadata.mnode.StorageGroupMNode;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.sys.ChangeAliasPlan;
+import org.apache.iotdb.db.qp.physical.sys.ChangeTagOffsetPlan;
+import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.MNodePlan;
+import org.apache.iotdb.db.qp.physical.sys.MeasurementMNodePlan;
+import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.SetTTLPlan;
+import org.apache.iotdb.db.qp.physical.sys.StorageGroupMNodePlan;
+import org.apache.iotdb.db.writelog.io.LogWriter;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+public class MLogWriter implements AutoCloseable {
+
+ private static final Logger logger =
LoggerFactory.getLogger(MLogWriter.class);
+ private File logFile;
+ private LogWriter logWriter;
+ private int logNum;
+ private static final String DELETE_FAILED_FORMAT = "Deleting %s failed with
exception %s";
+ private ByteBuffer mlogBuffer = ByteBuffer.allocate(
+ IoTDBDescriptor.getInstance().getConfig().getMlogBufferSize());
+
+ // we write log to channel every time, so we need not to call channel.force
every time
+ private static final long DUMMY_FLUSH_TIME = 100;
+ private static final String LOG_TOO_LARGE_INFO = "Log cannot fit into
buffer, please increase mlog_buffer_size";
+
+ public MLogWriter(String schemaDir, String logFileName) throws IOException {
+ File metadataDir = SystemFileFactory.INSTANCE.getFile(schemaDir);
+ if (!metadataDir.exists()) {
+ if (metadataDir.mkdirs()) {
+ logger.info("create schema folder {}.", metadataDir);
+ } else {
+ logger.warn("create schema folder {} failed.", metadataDir);
+ }
+ }
+
+ logFile = SystemFileFactory.INSTANCE.getFile(schemaDir + File.separator +
logFileName);
+ logWriter = new LogWriter(logFile, DUMMY_FLUSH_TIME);
+ }
+
+ public MLogWriter(String logFilePath) throws IOException {
+ logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
+ logWriter = new LogWriter(logFile, DUMMY_FLUSH_TIME);
+ }
+
+ @Override
+ public void close() throws IOException {
+ logWriter.close();
+ }
+
+ private void sync() {
+ try {
+ logWriter.write(mlogBuffer);
+ } catch (IOException e) {
+ logger.error("MLog {} sync failed, change system mode to read-only",
logFile.getAbsoluteFile(), e);
+ IoTDBDescriptor.getInstance().getConfig().setReadOnly(true);
+ }
+ mlogBuffer.clear();
+ }
+
+ private void putLog(PhysicalPlan plan) {
+ try {
+ plan.serialize(mlogBuffer);
+ sync();
+ logNum ++;
+ } catch (BufferOverflowException e) {
+ logger.warn("MLog {} BufferOverflow !", plan.getOperatorType(), e);
+ }
+ }
+
+ public void createTimeseries(CreateTimeSeriesPlan createTimeSeriesPlan)
throws IOException {
+ try {
+ putLog(createTimeSeriesPlan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void deleteTimeseries(DeleteTimeSeriesPlan deleteTimeSeriesPlan)
throws IOException {
+ try {
+ putLog(deleteTimeSeriesPlan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void setStorageGroup(PartialPath storageGroup) throws IOException {
+ try {
+ SetStorageGroupPlan plan = new SetStorageGroupPlan(storageGroup);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void deleteStorageGroup(PartialPath storageGroup) throws IOException {
+ try {
+ DeleteStorageGroupPlan plan = new
DeleteStorageGroupPlan(Collections.singletonList(storageGroup));
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void setTTL(PartialPath storageGroup, long ttl) throws IOException {
+ try {
+ SetTTLPlan plan = new SetTTLPlan(storageGroup, ttl);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void changeOffset(PartialPath path, long offset) throws IOException {
+ try {
+ ChangeTagOffsetPlan plan = new ChangeTagOffsetPlan(path, offset);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void changeAlias(PartialPath path, String alias) throws IOException {
+ try {
+ ChangeAliasPlan plan = new ChangeAliasPlan(path, alias);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void serializeMNode(MNode node) throws IOException {
+ try {
+ int childSize = 0;
+ if (node.getChildren() != null) {
+ childSize = node.getChildren().size();
+ }
+ MNodePlan plan = new MNodePlan(node.getName(), childSize);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void serializeMeasurementMNode(MeasurementMNode node) throws
IOException {
+ try {
+ int childSize = 0;
+ if (node.getChildren() != null) {
+ childSize = node.getChildren().size();
+ }
+ MeasurementMNodePlan plan = new MeasurementMNodePlan(node.getName(),
node.getAlias(),
+ node.getOffset(), childSize, node.getSchema());
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void serializeStorageGroupMNode(StorageGroupMNode node) throws
IOException {
+ try {
+ int childSize = 0;
+ if (node.getChildren() != null) {
+ childSize = node.getChildren().size();
+ }
+ StorageGroupMNodePlan plan = new StorageGroupMNodePlan(node.getName(),
node.getDataTTL(), childSize);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity
warning
+ public static void upgradeTxtToBin(String schemaDir, String oldFileName,
+ String newFileName, boolean isSnapshot)
throws IOException {
+ File logFile = SystemFileFactory.INSTANCE.getFile(schemaDir +
File.separator + newFileName);
+ File tmpLogFile =
SystemFileFactory.INSTANCE.getFile(logFile.getAbsolutePath() + ".tmp");
+ File oldLogFile = SystemFileFactory.INSTANCE.getFile(
+ schemaDir + File.separator + oldFileName);
+ File tmpOldLogFile =
SystemFileFactory.INSTANCE.getFile(oldLogFile.getAbsolutePath()
Review comment:
what is this file for? I do not see the generation of this file..
##########
File path:
server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogWriter.java
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.metadata.logfile;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.exception.metadata.MetadataException;
+import org.apache.iotdb.db.metadata.MetadataConstant;
+import org.apache.iotdb.db.metadata.MetadataOperationType;
+import org.apache.iotdb.db.metadata.PartialPath;
+import org.apache.iotdb.db.metadata.mnode.MNode;
+import org.apache.iotdb.db.metadata.mnode.MeasurementMNode;
+import org.apache.iotdb.db.metadata.mnode.StorageGroupMNode;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.sys.ChangeAliasPlan;
+import org.apache.iotdb.db.qp.physical.sys.ChangeTagOffsetPlan;
+import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.MNodePlan;
+import org.apache.iotdb.db.qp.physical.sys.MeasurementMNodePlan;
+import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.SetTTLPlan;
+import org.apache.iotdb.db.qp.physical.sys.StorageGroupMNodePlan;
+import org.apache.iotdb.db.writelog.io.LogWriter;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+public class MLogWriter implements AutoCloseable {
+
+ private static final Logger logger =
LoggerFactory.getLogger(MLogWriter.class);
+ private File logFile;
+ private LogWriter logWriter;
+ private int logNum;
+ private static final String DELETE_FAILED_FORMAT = "Deleting %s failed with
exception %s";
+ private ByteBuffer mlogBuffer = ByteBuffer.allocate(
+ IoTDBDescriptor.getInstance().getConfig().getMlogBufferSize());
+
+ // we write log to channel every time, so we need not to call channel.force
every time
+ private static final long DUMMY_FLUSH_TIME = 100;
+ private static final String LOG_TOO_LARGE_INFO = "Log cannot fit into
buffer, please increase mlog_buffer_size";
+
+ public MLogWriter(String schemaDir, String logFileName) throws IOException {
+ File metadataDir = SystemFileFactory.INSTANCE.getFile(schemaDir);
+ if (!metadataDir.exists()) {
+ if (metadataDir.mkdirs()) {
+ logger.info("create schema folder {}.", metadataDir);
+ } else {
+ logger.warn("create schema folder {} failed.", metadataDir);
+ }
+ }
+
+ logFile = SystemFileFactory.INSTANCE.getFile(schemaDir + File.separator +
logFileName);
+ logWriter = new LogWriter(logFile, DUMMY_FLUSH_TIME);
+ }
+
+ public MLogWriter(String logFilePath) throws IOException {
+ logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
+ logWriter = new LogWriter(logFile, DUMMY_FLUSH_TIME);
+ }
+
+ @Override
+ public void close() throws IOException {
+ logWriter.close();
+ }
+
+ private void sync() {
+ try {
+ logWriter.write(mlogBuffer);
+ } catch (IOException e) {
+ logger.error("MLog {} sync failed, change system mode to read-only",
logFile.getAbsoluteFile(), e);
+ IoTDBDescriptor.getInstance().getConfig().setReadOnly(true);
+ }
+ mlogBuffer.clear();
+ }
+
+ private void putLog(PhysicalPlan plan) {
+ try {
+ plan.serialize(mlogBuffer);
+ sync();
+ logNum ++;
+ } catch (BufferOverflowException e) {
+ logger.warn("MLog {} BufferOverflow !", plan.getOperatorType(), e);
+ }
+ }
+
+ public void createTimeseries(CreateTimeSeriesPlan createTimeSeriesPlan)
throws IOException {
+ try {
+ putLog(createTimeSeriesPlan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void deleteTimeseries(DeleteTimeSeriesPlan deleteTimeSeriesPlan)
throws IOException {
+ try {
+ putLog(deleteTimeSeriesPlan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void setStorageGroup(PartialPath storageGroup) throws IOException {
+ try {
+ SetStorageGroupPlan plan = new SetStorageGroupPlan(storageGroup);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void deleteStorageGroup(PartialPath storageGroup) throws IOException {
+ try {
+ DeleteStorageGroupPlan plan = new
DeleteStorageGroupPlan(Collections.singletonList(storageGroup));
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void setTTL(PartialPath storageGroup, long ttl) throws IOException {
+ try {
+ SetTTLPlan plan = new SetTTLPlan(storageGroup, ttl);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void changeOffset(PartialPath path, long offset) throws IOException {
+ try {
+ ChangeTagOffsetPlan plan = new ChangeTagOffsetPlan(path, offset);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void changeAlias(PartialPath path, String alias) throws IOException {
+ try {
+ ChangeAliasPlan plan = new ChangeAliasPlan(path, alias);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void serializeMNode(MNode node) throws IOException {
+ try {
+ int childSize = 0;
+ if (node.getChildren() != null) {
+ childSize = node.getChildren().size();
+ }
+ MNodePlan plan = new MNodePlan(node.getName(), childSize);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void serializeMeasurementMNode(MeasurementMNode node) throws
IOException {
+ try {
+ int childSize = 0;
+ if (node.getChildren() != null) {
+ childSize = node.getChildren().size();
+ }
+ MeasurementMNodePlan plan = new MeasurementMNodePlan(node.getName(),
node.getAlias(),
+ node.getOffset(), childSize, node.getSchema());
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ public void serializeStorageGroupMNode(StorageGroupMNode node) throws
IOException {
+ try {
+ int childSize = 0;
+ if (node.getChildren() != null) {
+ childSize = node.getChildren().size();
+ }
+ StorageGroupMNodePlan plan = new StorageGroupMNodePlan(node.getName(),
node.getDataTTL(), childSize);
+ putLog(plan);
+ } catch (BufferOverflowException e) {
+ throw new IOException(
+ LOG_TOO_LARGE_INFO, e);
+ }
+ }
+
+ @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity
warning
+ public static void upgradeTxtToBin(String schemaDir, String oldFileName,
+ String newFileName, boolean isSnapshot)
throws IOException {
+ File logFile = SystemFileFactory.INSTANCE.getFile(schemaDir +
File.separator + newFileName);
+ File tmpLogFile =
SystemFileFactory.INSTANCE.getFile(logFile.getAbsolutePath() + ".tmp");
+ File oldLogFile = SystemFileFactory.INSTANCE.getFile(
+ schemaDir + File.separator + oldFileName);
+ File tmpOldLogFile =
SystemFileFactory.INSTANCE.getFile(oldLogFile.getAbsolutePath()
+ + ".tmp");
+
+ if (oldLogFile.exists() || tmpOldLogFile.exists()) {
+
+ if (tmpOldLogFile.exists() && !oldLogFile.exists()) {
+ FileUtils.moveFile(tmpOldLogFile, oldLogFile);
+ }
+
+ try (MLogWriter mLogWriter = new MLogWriter(schemaDir, newFileName +
".tmp");
+ MLogTxtReader mLogTxtReader = new MLogTxtReader(schemaDir,
oldFileName)) {
+ // upgrade from old character log file to new binary mlog
+ while (mLogTxtReader.hasNext()) {
+ String cmd = mLogTxtReader.next();
+ try {
+ mLogWriter.operation(cmd, isSnapshot);
+ } catch (MetadataException e) {
+ logger.error("failed to upgrade cmd {}.", cmd, e);
+ }
+ }
+ }
+ } else if (!logFile.exists() && !tmpLogFile.exists()) {
+ // if both .bin and .bin.tmp do not exist, nothing to do
+ } else if (!logFile.exists() && tmpLogFile.exists()) {
+ // if old .bin doesn't exist but .bin.tmp exists, rename tmp file to .bin
+ FSFactoryProducer.getFSFactory().moveFile(tmpLogFile, logFile);
+ } else if (tmpLogFile.exists()) {
+ // if both .bin and .bin.tmp exist, delete .bin.tmp
+ try {
+ Files.delete(Paths.get(tmpLogFile.toURI()));
+ } catch (IOException e) {
+ throw new IOException(String.format(DELETE_FAILED_FORMAT, tmpLogFile,
e.getMessage()));
+ }
+ }
+
+ // do some clean job
+ // remove old .txt and .txt.tmp
+ if (oldLogFile.exists()) {
+ try {
+ Files.delete(Paths.get(oldLogFile.toURI()));
+ } catch (IOException e) {
+ throw new IOException(String.format(DELETE_FAILED_FORMAT, oldLogFile,
e.getMessage()));
+ }
+ }
+
+ if (tmpOldLogFile.exists()) {
+ try {
+ Files.delete(Paths.get(tmpOldLogFile.toURI()));
+ } catch (IOException e) {
+ throw new IOException(String.format(DELETE_FAILED_FORMAT,
tmpOldLogFile, e.getMessage()));
+ }
+ }
+
+ // rename .bin.tmp to .bin
+ FSFactoryProducer.getFSFactory().moveFile(tmpLogFile, logFile);
+ }
+
+ public static void upgradeMLog() throws IOException {
+ String schemaDir =
IoTDBDescriptor.getInstance().getConfig().getSchemaDir();
+ upgradeTxtToBin(schemaDir, MetadataConstant.METADATA_TXT_LOG,
MetadataConstant.METADATA_LOG, false);
+ upgradeTxtToBin(schemaDir, MetadataConstant.MTREE_TXT_SNAPSHOT,
MetadataConstant.MTREE_SNAPSHOT, true);
+ }
+
+ public void clear() throws IOException {
+ sync();
+ logWriter.close();
+ mlogBuffer.clear();
+ if (logFile != null && logFile.exists()) {
+ Files.delete(logFile.toPath());
+ }
+ logNum = 0;
+ logWriter = new LogWriter(logFile, 0L);
Review comment:
The forcePeriodInMs is differenet between clear() and constructor.
##########
File path: server/src/main/java/org/apache/iotdb/db/writelog/io/LogWriter.java
##########
@@ -18,36 +18,59 @@
*/
package org.apache.iotdb.db.writelog.io;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.utils.TestOnly;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.File;
+import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.util.zip.CRC32;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
/**
- * LogWriter writes the binarized logs into a file using FileChannel together
with check sums of
+ * LogWriter writes the binary logs into a file using FileChannel together
with check sums of
* each log calculated using CRC32.
*/
public class LogWriter implements ILogWriter {
+ private static final Logger logger =
LoggerFactory.getLogger(LogWriter.class);
private File logFile;
private FileOutputStream fileOutputStream;
private FileChannel channel;
- private CRC32 checkSummer = new CRC32();
- private IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- private ByteBuffer lengthBuffer = ByteBuffer.allocate(4);
- private ByteBuffer checkSumBuffer = ByteBuffer.allocate(8);
+ private final CRC32 checkSummer = new CRC32();
+ private final ByteBuffer lengthBuffer = ByteBuffer.allocate(4);
+ private final ByteBuffer checkSumBuffer = ByteBuffer.allocate(8);
+ private final long forcePeriodInMs;
- public LogWriter(String logFilePath) {
+ /**
+ * @param logFilePath
+ * @param forcePeriodInMs
+ * @throws FileNotFoundException
+ */
+ @TestOnly
+ public LogWriter(String logFilePath, long forcePeriodInMs) throws
FileNotFoundException {
logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
+ this.forcePeriodInMs = forcePeriodInMs;
+
+ if (channel == null) {
+ fileOutputStream = new FileOutputStream(logFile, true);
+ channel = fileOutputStream.getChannel();
+ }
}
- public LogWriter(File logFile) {
+ public LogWriter(File logFile, long forcePeriodInMs) throws
FileNotFoundException {
Review comment:
The actual usage of 'forcePeriodInMs' parameter is a boolean
'forceAtEachWrite'
##########
File path:
server/src/main/java/org/apache/iotdb/db/metadata/logfile/MLogWriter.java
##########
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iotdb.db.metadata.logfile;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
+import org.apache.iotdb.db.exception.metadata.MetadataException;
+import org.apache.iotdb.db.metadata.MetadataConstant;
+import org.apache.iotdb.db.metadata.MetadataOperationType;
+import org.apache.iotdb.db.metadata.PartialPath;
+import org.apache.iotdb.db.metadata.mnode.MNode;
+import org.apache.iotdb.db.metadata.mnode.MeasurementMNode;
+import org.apache.iotdb.db.metadata.mnode.StorageGroupMNode;
+import org.apache.iotdb.db.qp.physical.PhysicalPlan;
+import org.apache.iotdb.db.qp.physical.sys.ChangeAliasPlan;
+import org.apache.iotdb.db.qp.physical.sys.ChangeTagOffsetPlan;
+import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan;
+import org.apache.iotdb.db.qp.physical.sys.MNodePlan;
+import org.apache.iotdb.db.qp.physical.sys.MeasurementMNodePlan;
+import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan;
+import org.apache.iotdb.db.qp.physical.sys.SetTTLPlan;
+import org.apache.iotdb.db.qp.physical.sys.StorageGroupMNodePlan;
+import org.apache.iotdb.db.writelog.io.LogWriter;
+import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
+import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+public class MLogWriter implements AutoCloseable {
+
+ private static final Logger logger =
LoggerFactory.getLogger(MLogWriter.class);
+ private File logFile;
+ private LogWriter logWriter;
+ private int logNum;
+ private static final String DELETE_FAILED_FORMAT = "Deleting %s failed with
exception %s";
+ private ByteBuffer mlogBuffer = ByteBuffer.allocate(
+ IoTDBDescriptor.getInstance().getConfig().getMlogBufferSize());
+
+ // we write log to channel every time, so we need not to call channel.force
every time
+ private static final long DUMMY_FLUSH_TIME = 100;
Review comment:
The function of this field is the same with a boolean
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]