This is an automated email from the ASF dual-hosted git repository.
caogaofei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/master by this push:
new cdbac42b8b6 Fixed the bug that table compatibility check may take too
much locks in tree dialect
cdbac42b8b6 is described below
commit cdbac42b8b6376662fc3bc121f379562cd16d738
Author: Caideyipi <[email protected]>
AuthorDate: Wed Aug 14 07:24:18 2024 +0800
Fixed the bug that table compatibility check may take too much locks in
tree dialect
---
.../iotdb/db/queryengine/plan/Coordinator.java | 11 +-
.../queryengine/plan/analyze/AnalyzeVisitor.java | 143 +++++++++++----------
.../analyze/lock/DataNodeSchemaLockManager.java | 23 +++-
.../plan/analyze/schema/ClusterSchemaFetcher.java | 64 ++++-----
.../fetcher/TableHeaderSchemaValidator.java | 6 +-
5 files changed, 132 insertions(+), 115 deletions(-)
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
index 3aed822ec05..e8481dbd962 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/Coordinator.java
@@ -37,7 +37,6 @@ import org.apache.iotdb.db.queryengine.common.SessionInfo;
import org.apache.iotdb.db.queryengine.execution.QueryIdGenerator;
import org.apache.iotdb.db.queryengine.plan.analyze.IPartitionFetcher;
import
org.apache.iotdb.db.queryengine.plan.analyze.lock.DataNodeSchemaLockManager;
-import org.apache.iotdb.db.queryengine.plan.analyze.lock.SchemaLockType;
import org.apache.iotdb.db.queryengine.plan.analyze.schema.ISchemaFetcher;
import org.apache.iotdb.db.queryengine.plan.execution.ExecutionResult;
import org.apache.iotdb.db.queryengine.plan.execution.IQueryExecution;
@@ -73,7 +72,6 @@ import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
-import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ScheduledExecutorService;
@@ -164,14 +162,7 @@ public class Coordinator {
if (queryContext != null) {
queryContext.releaseAllMemoryReservedForFrontEnd();
}
- if (queryContext != null &&
!queryContext.getAcquiredLockNumMap().isEmpty()) {
- Map<SchemaLockType, Integer> lockMap =
queryContext.getAcquiredLockNumMap();
- for (Map.Entry<SchemaLockType, Integer> entry : lockMap.entrySet()) {
- for (int i = 0; i < entry.getValue(); i++) {
-
DataNodeSchemaLockManager.getInstance().releaseReadLock(entry.getKey());
- }
- }
- }
+ DataNodeSchemaLockManager.getInstance().releaseReadLock(queryContext);
}
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
index 2925c1992bb..535261849d4 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java
@@ -2399,7 +2399,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
Analysis analysis = new Analysis();
analysis.setRealStatement(createTimeSeriesStatement);
- checkIsTableCompatible(createTimeSeriesStatement.getPath(), context);
+ checkIsTableCompatible(createTimeSeriesStatement.getPath(), context, true);
checkIsTemplateCompatible(
createTimeSeriesStatement.getPath(),
createTimeSeriesStatement.getAlias(), context, true);
@@ -2413,32 +2413,35 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
private void checkIsTemplateCompatible(
- PartialPath timeseriesPath, String alias, MPPQueryContext context,
boolean takeLock) {
+ final PartialPath timeSeriesPath,
+ final String alias,
+ final MPPQueryContext context,
+ final boolean takeLock) {
if (takeLock) {
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE);
}
- Pair<Template, PartialPath> templateInfo =
- schemaFetcher.checkTemplateSetAndPreSetInfo(timeseriesPath, alias);
+ final Pair<Template, PartialPath> templateInfo =
+ schemaFetcher.checkTemplateSetAndPreSetInfo(timeSeriesPath, alias);
if (templateInfo != null) {
throw new SemanticException(
new TemplateIncompatibleException(
- timeseriesPath.getFullPath(), templateInfo.left.getName(),
templateInfo.right));
+ timeSeriesPath.getFullPath(), templateInfo.left.getName(),
templateInfo.right));
}
}
private void checkIsTemplateCompatible(
- PartialPath devicePath,
- List<String> measurements,
- List<String> aliasList,
- MPPQueryContext context,
- boolean takeLock) {
+ final PartialPath devicePath,
+ final List<String> measurements,
+ final List<String> aliasList,
+ final MPPQueryContext context,
+ final boolean takeLock) {
if (takeLock) {
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE);
}
for (int i = 0; i < measurements.size(); i++) {
- Pair<Template, PartialPath> templateInfo =
+ final Pair<Template, PartialPath> templateInfo =
schemaFetcher.checkTemplateSetAndPreSetInfo(
devicePath.concatAsMeasurementPath(measurements.get(i)),
aliasList == null ? null : aliasList.get(i));
@@ -2452,26 +2455,29 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
}
- private void checkIsTableCompatible(PartialPath timeseriesPath,
MPPQueryContext context) {
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TABLE);
- context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TABLE);
- Pair<String, String> tableInfo =
-
DataNodeTableCache.getInstance().checkTableCreateAndPreCreateOnGivenPath(timeseriesPath);
+ private void checkIsTableCompatible(
+ final PartialPath timeSeriesPath, final MPPQueryContext context, final
boolean takeLock) {
+ if (takeLock) {
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TABLE);
+ }
+ final Pair<String, String> tableInfo =
+
DataNodeTableCache.getInstance().checkTableCreateAndPreCreateOnGivenPath(timeSeriesPath);
if (tableInfo != null) {
throw new SemanticException(new
TableAlreadyExistsException(tableInfo.left, tableInfo.right));
}
}
- private void analyzeSchemaProps(Map<String, String> props) {
+ private void analyzeSchemaProps(final Map<String, String> props) {
if (props == null || props.isEmpty()) {
return;
}
- Map<String, String> caseChangeMap = new HashMap<>();
- for (String key : props.keySet()) {
+ final Map<String, String> caseChangeMap = new HashMap<>();
+ for (final String key : props.keySet()) {
caseChangeMap.put(key.toLowerCase(Locale.ROOT), key);
}
- for (Map.Entry<String, String> caseChangeEntry : caseChangeMap.entrySet())
{
- String lowerCaseKey = caseChangeEntry.getKey();
+ for (final Map.Entry<String, String> caseChangeEntry :
caseChangeMap.entrySet()) {
+ final String lowerCaseKey = caseChangeEntry.getKey();
if (!ALLOWED_SCHEMA_PROPS.contains(lowerCaseKey)) {
throw new SemanticException(
new MetadataException(
@@ -2484,11 +2490,11 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
}
- private void analyzeSchemaProps(List<Map<String, String>> propsList) {
+ private void analyzeSchemaProps(final List<Map<String, String>> propsList) {
if (propsList == null) {
return;
}
- for (Map<String, String> props : propsList) {
+ for (final Map<String, String> props : propsList) {
analyzeSchemaProps(props);
}
}
@@ -2511,7 +2517,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
Analysis analysis = new Analysis();
analysis.setRealStatement(createAlignedTimeSeriesStatement);
- checkIsTableCompatible(createAlignedTimeSeriesStatement.getDevicePath(),
context);
+ checkIsTableCompatible(createAlignedTimeSeriesStatement.getDevicePath(),
context, true);
checkIsTemplateCompatible(
createAlignedTimeSeriesStatement.getDevicePath(),
createAlignedTimeSeriesStatement.getMeasurements(),
@@ -2540,7 +2546,7 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
Analysis analysis = new Analysis();
analysis.setRealStatement(internalCreateTimeSeriesStatement);
- checkIsTableCompatible(internalCreateTimeSeriesStatement.getDevicePath(),
context);
+ checkIsTableCompatible(internalCreateTimeSeriesStatement.getDevicePath(),
context, true);
checkIsTemplateCompatible(
internalCreateTimeSeriesStatement.getDevicePath(),
internalCreateTimeSeriesStatement.getMeasurements(),
@@ -2564,25 +2570,27 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
@Override
public Analysis visitInternalCreateMultiTimeSeries(
- InternalCreateMultiTimeSeriesStatement
internalCreateMultiTimeSeriesStatement,
- MPPQueryContext context) {
+ final InternalCreateMultiTimeSeriesStatement
internalCreateMultiTimeSeriesStatement,
+ final MPPQueryContext context) {
context.setQueryType(QueryType.WRITE);
- Analysis analysis = new Analysis();
+ final Analysis analysis = new Analysis();
analysis.setRealStatement(internalCreateMultiTimeSeriesStatement);
- PathPatternTree pathPatternTree = new PathPatternTree();
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- for (Map.Entry<PartialPath, Pair<Boolean, MeasurementGroup>> entry :
+ final PathPatternTree pathPatternTree = new PathPatternTree();
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TABLE);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE);
+ for (final Map.Entry<PartialPath, Pair<Boolean, MeasurementGroup>> entry :
internalCreateMultiTimeSeriesStatement.getDeviceMap().entrySet()) {
- checkIsTableCompatible(entry.getKey(), context);
+ checkIsTableCompatible(entry.getKey(), context, false);
checkIsTemplateCompatible(
entry.getKey(), entry.getValue().right.getMeasurements(), null,
context, false);
pathPatternTree.appendFullPath(entry.getKey().concatNode(ONE_LEVEL_PATH_WILDCARD));
}
- SchemaPartition schemaPartitionInfo;
+ final SchemaPartition schemaPartitionInfo;
schemaPartitionInfo =
partitionFetcher.getOrCreateSchemaPartition(
pathPatternTree, context.getSession().getUserName());
@@ -2592,29 +2600,32 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
@Override
public Analysis visitCreateMultiTimeseries(
- CreateMultiTimeSeriesStatement createMultiTimeSeriesStatement,
MPPQueryContext context) {
+ final CreateMultiTimeSeriesStatement createMultiTimeSeriesStatement,
+ final MPPQueryContext context) {
context.setQueryType(QueryType.WRITE);
- Analysis analysis = new Analysis();
+ final Analysis analysis = new Analysis();
analysis.setRealStatement(createMultiTimeSeriesStatement);
analyzeSchemaProps(createMultiTimeSeriesStatement.getPropsList());
- List<MeasurementPath> timeseriesPathList =
createMultiTimeSeriesStatement.getPaths();
- List<String> aliasList = createMultiTimeSeriesStatement.getAliasList();
+ final List<MeasurementPath> timeseriesPathList =
createMultiTimeSeriesStatement.getPaths();
+ final List<String> aliasList =
createMultiTimeSeriesStatement.getAliasList();
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TABLE);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE);
for (int i = 0; i < timeseriesPathList.size(); i++) {
- checkIsTableCompatible(timeseriesPathList.get(i), context);
+ checkIsTableCompatible(timeseriesPathList.get(i), context, false);
checkIsTemplateCompatible(
timeseriesPathList.get(i), aliasList == null ? null :
aliasList.get(i), context, false);
}
- PathPatternTree patternTree = new PathPatternTree();
- for (PartialPath path : createMultiTimeSeriesStatement.getPaths()) {
+ final PathPatternTree patternTree = new PathPatternTree();
+ for (final PartialPath path : createMultiTimeSeriesStatement.getPaths()) {
patternTree.appendFullPath(path);
}
- SchemaPartition schemaPartitionInfo =
+ final SchemaPartition schemaPartitionInfo =
partitionFetcher.getOrCreateSchemaPartition(
patternTree, context.getSession().getUserName());
analysis.setSchemaPartitionInfo(schemaPartitionInfo);
@@ -3822,10 +3833,10 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
private void checkTargetPathsInCreateLogicalView(
- Analysis analysis,
- CreateLogicalViewStatement createLogicalViewStatement,
- MPPQueryContext context) {
- Pair<Boolean, String> checkResult =
createLogicalViewStatement.checkTargetPaths();
+ final Analysis analysis,
+ final CreateLogicalViewStatement createLogicalViewStatement,
+ final MPPQueryContext context) {
+ final Pair<Boolean, String> checkResult =
createLogicalViewStatement.checkTargetPaths();
if (Boolean.FALSE.equals(checkResult.left)) {
analysis.setFinishQueryAfterAnalyze(true);
analysis.setFailStatus(
@@ -3836,10 +3847,10 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
// Make sure there are no redundant paths in targets. Note that redundant
paths in source
// are legal.
- List<PartialPath> targetPathList =
createLogicalViewStatement.getTargetPathList();
- Set<String> targetStringSet = new HashSet<>();
- for (PartialPath path : targetPathList) {
- boolean repeatPathNotExist = targetStringSet.add(path.toString());
+ final List<PartialPath> targetPathList =
createLogicalViewStatement.getTargetPathList();
+ final Set<String> targetStringSet = new HashSet<>();
+ for (final PartialPath path : targetPathList) {
+ final boolean repeatPathNotExist = targetStringSet.add(path.toString());
if (!repeatPathNotExist) {
analysis.setFinishQueryAfterAnalyze(true);
analysis.setFailStatus(
@@ -3851,13 +3862,15 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
}
// Make sure all paths are not under any templates
try {
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- context.addAcquiredLockNum(SchemaLockType.TIMESERIES_VS_TEMPLATE);
- for (PartialPath path : createLogicalViewStatement.getTargetPathList()) {
- checkIsTableCompatible(path, context);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TABLE);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE);
+ for (final PartialPath path :
createLogicalViewStatement.getTargetPathList()) {
+ checkIsTableCompatible(path, context, false);
checkIsTemplateCompatible(path, null, context, false);
}
- } catch (Exception e) {
+ } catch (final Exception e) {
analysis.setFinishQueryAfterAnalyze(true);
analysis.setFailStatus(
RpcUtils.getStatus(
@@ -3868,14 +3881,14 @@ public class AnalyzeVisitor extends
StatementVisitor<Analysis, MPPQueryContext>
@Override
public Analysis visitShowLogicalView(
- ShowLogicalViewStatement showLogicalViewStatement, MPPQueryContext
context) {
+ final ShowLogicalViewStatement showLogicalViewStatement, final
MPPQueryContext context) {
context.setQueryType(QueryType.READ);
- Analysis analysis = new Analysis();
+ final Analysis analysis = new Analysis();
analysis.setRealStatement(showLogicalViewStatement);
- PathPatternTree patternTree = new PathPatternTree();
+ final PathPatternTree patternTree = new PathPatternTree();
patternTree.appendPathPattern(showLogicalViewStatement.getPathPattern());
- SchemaPartition schemaPartitionInfo =
partitionFetcher.getSchemaPartition(patternTree);
+ final SchemaPartition schemaPartitionInfo =
partitionFetcher.getSchemaPartition(patternTree);
analysis.setSchemaPartitionInfo(schemaPartitionInfo);
analysis.setRespDatasetHeader(DatasetHeaderFactory.getShowLogicalViewHeader());
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java
index b57986ccdec..598124ce0de 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java
@@ -19,6 +19,9 @@
package org.apache.iotdb.db.queryengine.plan.analyze.lock;
+import org.apache.iotdb.db.queryengine.common.MPPQueryContext;
+
+import java.util.Map;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public class DataNodeSchemaLockManager {
@@ -34,26 +37,34 @@ public class DataNodeSchemaLockManager {
}
private DataNodeSchemaLockManager() {
- int lockNum = SchemaLockType.values().length;
+ final int lockNum = SchemaLockType.values().length;
this.locks = new ReentrantReadWriteLock[lockNum];
for (int i = 0; i < lockNum; i++) {
locks[i] = new ReentrantReadWriteLock(false);
}
}
- public void takeReadLock(SchemaLockType lockType) {
+ public void takeReadLock(final MPPQueryContext context, final SchemaLockType
lockType) {
locks[lockType.ordinal()].readLock().lock();
+ context.addAcquiredLockNum(lockType);
}
- public void releaseReadLock(SchemaLockType lockType) {
- locks[lockType.ordinal()].readLock().unlock();
+ public void releaseReadLock(final MPPQueryContext queryContext) {
+ if (queryContext != null &&
!queryContext.getAcquiredLockNumMap().isEmpty()) {
+ final Map<SchemaLockType, Integer> lockMap =
queryContext.getAcquiredLockNumMap();
+ for (final Map.Entry<SchemaLockType, Integer> entry :
lockMap.entrySet()) {
+ for (int i = 0; i < entry.getValue(); i++) {
+ locks[entry.getKey().ordinal()].readLock().unlock();
+ }
+ }
+ }
}
- public void takeWriteLock(SchemaLockType lockType) {
+ public void takeWriteLock(final SchemaLockType lockType) {
locks[lockType.ordinal()].writeLock().lock();
}
- public void releaseWriteLock(SchemaLockType lockType) {
+ public void releaseWriteLock(final SchemaLockType lockType) {
locks[lockType.ordinal()].writeLock().unlock();
}
}
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
index eb5fcd1a9be..a985ead3896 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/schema/ClusterSchemaFetcher.java
@@ -187,17 +187,17 @@ public class ClusterSchemaFetcher implements
ISchemaFetcher {
@Override
public void fetchAndComputeSchemaWithAutoCreate(
- ISchemaComputationWithAutoCreation schemaComputationWithAutoCreation,
- MPPQueryContext context) {
+ final ISchemaComputationWithAutoCreation
schemaComputationWithAutoCreation,
+ final MPPQueryContext context) {
// The schema cache R/W and fetch operation must be locked together thus
the cache clean
// operation executed by delete timeseries will be effective.
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION);
- context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION);
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION);
schemaCache.takeReadLock();
try {
- Pair<Template, PartialPath> templateSetInfo =
+ final Pair<Template, PartialPath> templateSetInfo =
templateManager.checkTemplateSetInfo(schemaComputationWithAutoCreation.getDevicePath());
- List<Integer> indexOfMissingMeasurements;
+ final List<Integer> indexOfMissingMeasurements;
if (templateSetInfo == null) {
// normal timeseries
indexOfMissingMeasurements =
@@ -215,7 +215,7 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
}
// offer null for the rest missing schema processing
- for (int index : indexOfMissingMeasurements) {
+ for (final int index : indexOfMissingMeasurements) {
schemaComputationWithAutoCreation.computeMeasurement(index, null);
}
} finally {
@@ -225,12 +225,13 @@ public class ClusterSchemaFetcher implements
ISchemaFetcher {
@Override
public void fetchAndComputeSchemaWithAutoCreate(
- List<? extends ISchemaComputationWithAutoCreation>
schemaComputationWithAutoCreationList,
- MPPQueryContext context) {
+ final List<? extends ISchemaComputationWithAutoCreation>
+ schemaComputationWithAutoCreationList,
+ final MPPQueryContext context) {
// The schema cache R/W and fetch operation must be locked together thus
the cache clean
- // operation executed by delete timeseries will be effective.
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION);
- context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION);
+ // operation executed by delete timeSeries will be effective.
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION);
schemaCache.takeReadLock();
try {
@@ -264,25 +265,26 @@ public class ClusterSchemaFetcher implements
ISchemaFetcher {
@Override
public ISchemaTree fetchSchemaListWithAutoCreate(
- List<PartialPath> devicePathList,
- List<String[]> measurementsList,
- List<TSDataType[]> tsDataTypesList,
- List<TSEncoding[]> encodingsList,
- List<CompressionType[]> compressionTypesList,
- List<Boolean> isAlignedList,
- MPPQueryContext context) {
+ final List<PartialPath> devicePathList,
+ final List<String[]> measurementsList,
+ final List<TSDataType[]> tsDataTypesList,
+ final List<TSEncoding[]> encodingsList,
+ final List<CompressionType[]> compressionTypesList,
+ final List<Boolean> isAlignedList,
+ final MPPQueryContext context) {
// The schema cache R/W and fetch operation must be locked together thus
the cache clean
- // operation executed by delete timeseries will be effective.
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION);
- context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION);
+ // operation executed by delete timeSeries will be effective.
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION);
schemaCache.takeReadLock();
try {
- ClusterSchemaTree schemaTree = new ClusterSchemaTree();
- List<List<Integer>> indexOfMissingMeasurementsList = new
ArrayList<>(devicePathList.size());
- List<Integer> indexOfDevicesWithMissingMeasurements = new ArrayList<>();
+ final ClusterSchemaTree schemaTree = new ClusterSchemaTree();
+ final List<List<Integer>> indexOfMissingMeasurementsList =
+ new ArrayList<>(devicePathList.size());
+ final List<Integer> indexOfDevicesWithMissingMeasurements = new
ArrayList<>();
for (int i = 0; i < devicePathList.size(); i++) {
schemaTree.mergeSchemaTree(schemaCache.get(devicePathList.get(i),
measurementsList.get(i)));
- List<Integer> indexOfMissingMeasurements =
+ final List<Integer> indexOfMissingMeasurements =
checkMissingMeasurements(schemaTree, devicePathList.get(i),
measurementsList.get(i));
if (!indexOfMissingMeasurements.isEmpty()) {
indexOfDevicesWithMissingMeasurements.add(i);
@@ -295,8 +297,8 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
return schemaTree;
}
- // try fetch the missing schema from remote and cache fetched schema
- ClusterSchemaTree remoteSchemaTree =
+ // Try fetch the missing schema from remote and cache fetched schema
+ final ClusterSchemaTree remoteSchemaTree =
clusterSchemaFetchExecutor.fetchSchemaOfMultiDevices(
devicePathList,
measurementsList,
@@ -311,9 +313,9 @@ public class ClusterSchemaFetcher implements ISchemaFetcher
{
return schemaTree;
}
- // auto create the still missing schema and merge them into schemaTree
- List<Integer> indexOfDevicesNeedAutoCreateSchema = new ArrayList<>();
- List<List<Integer>> indexOfMeasurementsNeedAutoCreate = new
ArrayList<>();
+ // Auto create the still missing schema and merge them into schemaTree
+ final List<Integer> indexOfDevicesNeedAutoCreateSchema = new
ArrayList<>();
+ final List<List<Integer>> indexOfMeasurementsNeedAutoCreate = new
ArrayList<>();
List<Integer> indexOfMissingMeasurements;
int deviceIndex;
for (int i = 0, size = indexOfDevicesWithMissingMeasurements.size(); i <
size; i++) {
diff --git
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/TableHeaderSchemaValidator.java
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/TableHeaderSchemaValidator.java
index e56ed88ebec..c52d870e126 100644
---
a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/TableHeaderSchemaValidator.java
+++
b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/TableHeaderSchemaValidator.java
@@ -79,9 +79,9 @@ public class TableHeaderSchemaValidator {
public Optional<TableSchema> validateTableHeaderSchema(
String database, TableSchema tableSchema, MPPQueryContext context,
boolean allowCreateTable) {
// The schema cache R/W and fetch operation must be locked together thus
the cache clean
- // operation executed by delete timeseries will be effective.
-
DataNodeSchemaLockManager.getInstance().takeReadLock(SchemaLockType.VALIDATE_VS_DELETION);
- context.addAcquiredLockNum(SchemaLockType.VALIDATE_VS_DELETION);
+ // operation executed by delete timeSeries will be effective.
+ DataNodeSchemaLockManager.getInstance()
+ .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION);
List<ColumnSchema> inputColumnList = tableSchema.getColumns();
if (inputColumnList == null || inputColumnList.isEmpty()) {