This is an automated email from the ASF dual-hosted git repository.
alsuliman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/asterixdb.git
The following commit(s) were added to refs/heads/master by this push:
new 2ac3da96df [ASTERIXDB-3144][COMP] Remove partitions map null check
2ac3da96df is described below
commit 2ac3da96df2c311268204683ea8591aad3826ecd
Author: Ali Alsuliman <[email protected]>
AuthorDate: Tue May 16 13:44:19 2023 -0700
[ASTERIXDB-3144][COMP] Remove partitions map null check
- user model changes: no
- storage format changes: no
- interface changes: no
Details:
Remove partitions map null check in index modification runtime.
- always contribute constraints for the data source scan op.
Change-Id: I871eeced711057d46cd682d51daf94b6b8f979d3
Reviewed-on: https://asterix-gerrit.ics.uci.edu/c/asterixdb/+/17538
Integration-Tests: Jenkins <[email protected]>
Tested-by: Jenkins <[email protected]>
Reviewed-by: Ali Alsuliman <[email protected]>
Reviewed-by: Murtadha Hubail <[email protected]>
---
.../operators/physical/InvertedIndexPOperator.java | 2 +-
.../asterix/app/function/DumpIndexRewriter.java | 2 +-
.../org/apache/asterix/utils/DataverseUtil.java | 2 +-
.../org/apache/asterix/utils/FeedOperations.java | 2 +-
.../org/apache/asterix/utils/FlushDatasetUtil.java | 1 +
.../common/cluster/PartitioningProperties.java | 2 +-
.../asterix/metadata/declared/MetadataProvider.java | 21 ++++++++++++---------
.../DatasetTupleTranslator.java | 5 ++---
.../apache/asterix/metadata/utils/DatasetUtil.java | 12 ++++++------
.../metadata/utils/SampleOperationsHelper.java | 4 ++--
.../utils/SecondaryIndexOperationsHelper.java | 6 +++---
.../utils/SecondaryTreeIndexOperationsHelper.java | 4 ++--
.../operators/physical/DataSourceScanPOperator.java | 4 +---
...IndexInsertUpdateDeleteOperatorNodePushable.java | 2 +-
14 files changed, 35 insertions(+), 34 deletions(-)
diff --git
a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
index 20334bf277..5e66f6892e 100644
---
a/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
+++
b/asterixdb/asterix-algebra/src/main/java/org/apache/asterix/algebra/operators/physical/InvertedIndexPOperator.java
@@ -184,7 +184,7 @@ public class InvertedIndexPOperator extends
IndexSearchPOperator {
((Index.TextIndexDetails)
secondaryIndex.getIndexDetails()).getFullTextConfigName());
IIndexDataflowHelperFactory dataflowHelperFactory =
new
IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMInvertedIndexSearchOperatorDescriptor invIndexSearchOp =
new LSMInvertedIndexSearchOperatorDescriptor(jobSpec,
outputRecDesc, queryField, dataflowHelperFactory,
queryTokenizerFactory, fullTextConfigEvaluatorFactory,
searchModifierFactory, retainInput,
diff --git
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
index dac1ac7224..9fb638581c 100644
---
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
+++
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/app/function/DumpIndexRewriter.java
@@ -71,7 +71,7 @@ public class DumpIndexRewriter extends FunctionRewriter {
metadataProvider.getPartitioningProperties(dataset,
index.getIndexName());
IndexDataflowHelperFactory indexDataflowHelperFactory =
new
IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
AlgebricksAbsolutePartitionConstraint secondaryPartitionConstraint =
(AlgebricksAbsolutePartitionConstraint)
partitioningProperties.getConstraints();
return new DumpIndexDatasource(context.getComputationNodeDomain(),
indexDataflowHelperFactory,
diff --git
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
index bfba414669..61b526c887 100644
---
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
+++
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/DataverseUtil.java
@@ -35,7 +35,7 @@ public class DataverseUtil {
JobSpecification jobSpec =
RuntimeUtils.createJobSpecification(metadata.getApplicationContext());
PartitioningProperties partitioningProperties =
metadata.splitAndConstraints(dataverse.getDataverseName());
FileRemoveOperatorDescriptor frod = new
FileRemoveOperatorDescriptor(jobSpec,
- partitioningProperties.getSpiltsProvider(), false,
partitioningProperties.getComputeStorageMap());
+ partitioningProperties.getSplitsProvider(), false,
partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(jobSpec,
frod,
partitioningProperties.getConstraints());
jobSpec.addRoot(frod);
diff --git
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
index 72961b8648..44f8a802ef 100644
---
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
+++
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FeedOperations.java
@@ -153,7 +153,7 @@ public class FeedOperations {
JobSpecification spec = RuntimeUtils.createJobSpecification(appCtx);
PartitioningProperties partitioningProperties =
metadataProvider.getPartitioningProperties(feed);
FileRemoveOperatorDescriptor frod = new
FileRemoveOperatorDescriptor(spec,
- partitioningProperties.getSpiltsProvider(), true,
partitioningProperties.getComputeStorageMap());
+ partitioningProperties.getSplitsProvider(), true,
partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec, frod,
partitioningProperties.getConstraints());
spec.addRoot(frod);
diff --git
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
index f012a4e56a..e46738db86 100644
---
a/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
+++
b/asterixdb/asterix-app/src/main/java/org/apache/asterix/utils/FlushDatasetUtil.java
@@ -69,6 +69,7 @@ public class FlushDatasetUtil {
metadataProvider.getPartitioningProperties(dataset,
dataset.getDatasetName());
AlgebricksPartitionConstraint primaryPartitionConstraint =
partitioningProperties.getConstraints();
+ //TODO(partitioning) can make it run only at NC level since any flush
call will flush all partitions in the NC
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec,
emptySource,
primaryPartitionConstraint);
diff --git
a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
index 1580ca4ef8..3443b84c61 100644
---
a/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
+++
b/asterixdb/asterix-common/src/main/java/org/apache/asterix/common/cluster/PartitioningProperties.java
@@ -38,7 +38,7 @@ public class PartitioningProperties {
return new PartitioningProperties(splitsProvider, constraints,
computeStorageMap);
}
- public IFileSplitProvider getSpiltsProvider() {
+ public IFileSplitProvider getSplitsProvider() {
return splitsProvider;
}
diff --git
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
index 3404ace0a5..b07a03eddc 100644
---
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
+++
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/declared/MetadataProvider.java
@@ -600,7 +600,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
primaryKeyFields, primaryKeyFieldsInSecondaryIndex,
proceedIndexOnlyPlan);
IStorageManager storageManager =
getStorageComponentProvider().getStorageManager();
IIndexDataflowHelperFactory indexHelperFactory =
- new IndexDataflowHelperFactory(storageManager,
datasetPartitioningProp.getSpiltsProvider());
+ new IndexDataflowHelperFactory(storageManager,
datasetPartitioningProp.getSplitsProvider());
BTreeSearchOperatorDescriptor btreeSearchOp;
int[][] partitionsMap = datasetPartitioningProp.getComputeStorageMap();
@@ -678,7 +678,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
primaryKeyFields, primaryKeyFieldsInSecondaryIndex,
isIndexOnlyPlan);
RTreeSearchOperatorDescriptor rtreeSearchOp;
IIndexDataflowHelperFactory indexDataflowHelperFactory = new
IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(),
partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(),
partitioningProperties.getSplitsProvider());
if (dataset.getDatasetType() == DatasetType.INTERNAL) {
int[][] partitionsMap =
partitioningProperties.getComputeStorageMap();
rtreeSearchOp = new RTreeSearchOperatorDescriptor(jobSpec,
outputRecDesc, keyFields, true, true,
@@ -930,7 +930,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
public FileSplit[] splitsForIndex(MetadataTransactionContext mdTxnCtx,
Dataset dataset, String indexName)
throws AlgebricksException {
- return dataPartitioningProvider.getPartitioningProperties(mdTxnCtx,
dataset, indexName).getSpiltsProvider()
+ return dataPartitioningProvider.getPartitioningProperties(mdTxnCtx,
dataset, indexName).getSplitsProvider()
.getFileSplits();
}
@@ -980,6 +980,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
ExternalScanOperatorDescriptor dataScanner = new
ExternalScanOperatorDescriptor(jobSpec, scannerDesc,
adapterFactory, tupleFilterFactory, outputLimit);
+ //TODO(partitioning) check
AlgebricksPartitionConstraint constraint;
try {
constraint = adapterFactory.getPartitionConstraint();
@@ -1042,8 +1043,9 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
IModificationOperationCallbackFactory modificationCallbackFactory =
dataset
.getModificationCallbackFactory(storageComponentProvider,
primaryIndex, indexOp, primaryKeyFields);
IIndexDataflowHelperFactory idfh = new
IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories =
dataset.getPrimaryHashFunctionFactories(this);
+ //TODO(partitioning) rename to static
ITuplePartitionerFactory partitionerFactory = new
FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1067,7 +1069,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
PartitioningProperties idxPartitioningProperties =
getPartitioningProperties(dataset,
primaryKeyIndex.get().getIndexName());
pkidfh = new
IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(),
- idxPartitioningProperties.getSpiltsProvider());
+ idxPartitioningProperties.getSplitsProvider());
}
op = createLSMPrimaryInsertOperatorDescriptor(spec,
inputRecordDesc, fieldPermutation, idfh, pkidfh,
modificationCallbackFactory, searchCallbackFactory,
numKeys, filterFields, partitionerFactory,
@@ -1243,7 +1245,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
IModificationOperationCallbackFactory modificationCallbackFactory
= dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp,
modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(),
partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(),
partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories =
dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory partitionerFactory = new
FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1313,7 +1315,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
IModificationOperationCallbackFactory modificationCallbackFactory
= dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp,
modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory idfh = new IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(),
partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(),
partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories =
dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory tuplePartitionerFactory = new
FieldHashPartitionerFactory(pkFields,
pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1419,7 +1421,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
IModificationOperationCallbackFactory modificationCallbackFactory =
dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp,
modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory indexDataflowHelperFactory = new
IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(),
partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(),
partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories =
dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory partitionerFactory = new
FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1538,7 +1540,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
IModificationOperationCallbackFactory modificationCallbackFactory
= dataset.getModificationCallbackFactory(
storageComponentProvider, secondaryIndex, indexOp,
modificationCallbackPrimaryKeyFields);
IIndexDataflowHelperFactory indexDataFlowFactory = new
IndexDataflowHelperFactory(
- storageComponentProvider.getStorageManager(),
partitioningProperties.getSpiltsProvider());
+ storageComponentProvider.getStorageManager(),
partitioningProperties.getSplitsProvider());
IBinaryHashFunctionFactory[] pkHashFunFactories =
dataset.getPrimaryHashFunctionFactories(this);
ITuplePartitionerFactory partitionerFactory = new
FieldHashPartitionerFactory(pkFields, pkHashFunFactories,
partitioningProperties.getNumberOfPartitions());
@@ -1743,6 +1745,7 @@ public class MetadataProvider implements
IMetadataProvider<DataSourceId, String>
keyFields[k] = k;
}
+ //TODO(partitioning) check
tokenizerOp = new BinaryTokenizerOperatorDescriptor(spec,
tokenKeyPairRecDesc, tokenizerFactory,
fullTextConfigEvaluatorFactory, docField, keyFields,
isPartitioned, true, false,
MissingWriterFactory.INSTANCE);
diff --git
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
index 70dfde1acc..af31c0d999 100644
---
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
+++
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/entitytupletranslators/DatasetTupleTranslator.java
@@ -73,7 +73,6 @@ import org.apache.asterix.om.types.AOrderedListType;
import org.apache.asterix.om.types.ARecordType;
import org.apache.asterix.om.types.ATypeTag;
import org.apache.asterix.om.types.AUnorderedListType;
-import org.apache.asterix.om.types.BuiltinType;
import org.apache.asterix.om.types.IAType;
import org.apache.asterix.runtime.compression.CompressionManager;
import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException;
@@ -221,8 +220,8 @@ public class DatasetTupleTranslator extends
AbstractTupleTranslator<Dataset> {
}
datasetDetails = new InternalDatasetDetails(fileStructure,
partitioningStrategy, partitioningKey,
- partitioningKey, keyFieldSourceIndicator,
primaryKeyTypes, autogenerated,
- filterSourceIndicator, filterField);
+ partitioningKey, keyFieldSourceIndicator,
primaryKeyTypes, autogenerated, filterSourceIndicator,
+ filterField);
break;
}
diff --git
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
index 2ce94355bc..277602d0af 100644
---
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
+++
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/DatasetUtil.java
@@ -313,7 +313,7 @@ public class DatasetUtil {
PartitioningProperties partitioningProperties =
metadataProvider.getPartitioningProperties(dataset);
IIndexDataflowHelperFactory indexHelperFactory =
new
IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
IndexDropOperatorDescriptor primaryBtreeDrop = new
IndexDropOperatorDescriptor(specPrimary, indexHelperFactory,
options, partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(specPrimary,
primaryBtreeDrop,
@@ -335,7 +335,7 @@ public class DatasetUtil {
JobSpecification spec =
RuntimeUtils.createJobSpecification(metadataProvider.getApplicationContext());
PartitioningProperties partitioningProperties =
metadataProvider.getPartitioningProperties(dataset);
- FileSplit[] fs =
partitioningProperties.getSpiltsProvider().getFileSplits();
+ FileSplit[] fs =
partitioningProperties.getSplitsProvider().getFileSplits();
StringBuilder sb = new StringBuilder();
for (FileSplit f : fs) {
sb.append(f).append(" ");
@@ -349,7 +349,7 @@ public class DatasetUtil {
compactionInfo.first, compactionInfo.second);
IndexBuilderFactory indexBuilderFactory =
new
IndexBuilderFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider(),
resourceFactory, true);
+ partitioningProperties.getSplitsProvider(),
resourceFactory, true);
IndexCreateOperatorDescriptor indexCreateOp = new
IndexCreateOperatorDescriptor(spec, indexBuilderFactory,
partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec,
indexCreateOp,
@@ -369,7 +369,7 @@ public class DatasetUtil {
PartitioningProperties partitioningProperties =
metadataProvider.getPartitioningProperties(dataset);
IIndexDataflowHelperFactory indexHelperFactory =
new
IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMTreeIndexCompactOperatorDescriptor compactOp = new
LSMTreeIndexCompactOperatorDescriptor(spec,
indexHelperFactory,
partitioningProperties.getComputeStorageMap());
AlgebricksPartitionConstraintHelper.setPartitionConstraintInJobSpec(spec,
compactOp,
@@ -395,7 +395,7 @@ public class DatasetUtil {
public static IOperatorDescriptor
createPrimaryIndexScanOp(JobSpecification spec, MetadataProvider
metadataProvider,
Dataset dataset, ITupleProjectorFactory projectorFactory) throws
AlgebricksException {
PartitioningProperties partitioningProperties =
metadataProvider.getPartitioningProperties(dataset);
- IFileSplitProvider primaryFileSplitProvider =
partitioningProperties.getSpiltsProvider();
+ IFileSplitProvider primaryFileSplitProvider =
partitioningProperties.getSplitsProvider();
AlgebricksPartitionConstraint primaryPartitionConstraint =
partitioningProperties.getConstraints();
// -Infinity
int[] lowKeyFields = null;
@@ -456,7 +456,7 @@ public class DatasetUtil {
ISearchOperationCallbackFactory searchCallbackFactory =
dataset.getSearchCallbackFactory(
storageComponentProvider, primaryIndex, IndexOperation.UPSERT,
primaryKeyFields);
IIndexDataflowHelperFactory idfh = new
IndexDataflowHelperFactory(storageComponentProvider.getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMPrimaryUpsertOperatorDescriptor op;
ITypeTraits[] outputTypeTraits = new
ITypeTraits[inputRecordDesc.getFieldCount() + 1
+ (dataset.hasMetaPart() ? 2 : 1) + numFilterFields];
diff --git
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
index 6b88960a25..7587769b3b 100644
---
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
+++
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SampleOperationsHelper.java
@@ -144,7 +144,7 @@ public class SampleOperationsHelper implements
ISecondaryIndexOperationsHelper {
// the index object information is fetched from the old source dataset
PartitioningProperties samplePartitioningProperties =
metadataProvider.getPartitioningProperties(dataset,
sampleIdx.getIndexName());
- fileSplitProvider = samplePartitioningProperties.getSpiltsProvider();
+ fileSplitProvider = samplePartitioningProperties.getSplitsProvider();
partitionConstraint = samplePartitioningProperties.getConstraints();
computeStorageMap =
samplePartitioningProperties.getComputeStorageMap();
numPartitions = samplePartitioningProperties.getNumberOfPartitions();
@@ -210,7 +210,7 @@ public class SampleOperationsHelper implements
ISecondaryIndexOperationsHelper {
Index idx = dsIndexes.get(i);
PartitioningProperties idxPartitioningProps =
metadataProvider.getPartitioningProperties(dataset,
idx.getIndexName());
- indexes[i] = new IndexDataflowHelperFactory(storageMgr,
idxPartitioningProps.getSpiltsProvider());
+ indexes[i] = new IndexDataflowHelperFactory(storageMgr,
idxPartitioningProps.getSplitsProvider());
names[i] = idx.getIndexName();
}
targetOp = new DatasetStreamStatsOperatorDescriptor(spec, recordDesc,
DATASET_STATS_OPERATOR_NAME, indexes,
diff --git
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
index 1a58423246..b824512332 100644
---
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
+++
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryIndexOperationsHelper.java
@@ -216,7 +216,7 @@ public abstract class SecondaryIndexOperationsHelper
implements ISecondaryIndexO
PartitioningProperties partitioningProperties;
partitioningProperties =
getSecondaryIndexBulkloadPartitioningProperties(metadataProvider, dataset,
index.getIndexName());
- secondaryFileSplitProvider =
partitioningProperties.getSpiltsProvider();
+ secondaryFileSplitProvider =
partitioningProperties.getSplitsProvider();
secondaryPartitionConstraint = partitioningProperties.getConstraints();
numPrimaryKeys = dataset.getPrimaryKeys().size();
if (dataset.getDatasetType() == DatasetType.INTERNAL) {
@@ -228,7 +228,7 @@ public abstract class SecondaryIndexOperationsHelper
implements ISecondaryIndexO
}
PartitioningProperties datasetPartitioningProperties =
getSecondaryIndexBulkloadPartitioningProperties(
metadataProvider, dataset, dataset.getDatasetName());
- primaryFileSplitProvider =
datasetPartitioningProperties.getSpiltsProvider();
+ primaryFileSplitProvider =
datasetPartitioningProperties.getSplitsProvider();
primaryPartitionConstraint =
datasetPartitioningProperties.getConstraints();
setPrimaryRecDescAndComparators();
}
@@ -538,7 +538,7 @@ public abstract class SecondaryIndexOperationsHelper
implements ISecondaryIndexO
// to ensure correctness, we will run in as many locations as storage
partitions
// this will not be needed once ASTERIXDB-3176 is implemented
if (this instanceof SecondaryCorrelatedTreeIndexOperationsHelper) {
- FileSplit[] fileSplits =
partitioningProperties.getSpiltsProvider().getFileSplits();
+ FileSplit[] fileSplits =
partitioningProperties.getSplitsProvider().getFileSplits();
Pair<IFileSplitProvider, AlgebricksPartitionConstraint> sp =
StoragePathUtil.splitProviderAndPartitionConstraints(fileSplits);
return PartitioningProperties.of(sp.getFirst(), sp.getSecond(),
diff --git
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
index 8dc0d966c8..0cda6259e0 100644
---
a/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
+++
b/asterixdb/asterix-metadata/src/main/java/org/apache/asterix/metadata/utils/SecondaryTreeIndexOperationsHelper.java
@@ -81,7 +81,7 @@ public abstract class SecondaryTreeIndexOperationsHelper
extends SecondaryIndexO
metadataProvider.getPartitioningProperties(dataset,
index.getIndexName());
IIndexDataflowHelperFactory dataflowHelperFactory =
new
IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
// The index drop operation should be persistent regardless of temp
datasets or permanent dataset.
IndexDropOperatorDescriptor btreeDrop = new
IndexDropOperatorDescriptor(spec, dataflowHelperFactory,
dropOptions, partitioningProperties.getComputeStorageMap());
@@ -99,7 +99,7 @@ public abstract class SecondaryTreeIndexOperationsHelper
extends SecondaryIndexO
metadataProvider.getPartitioningProperties(dataset,
index.getIndexName());
IIndexDataflowHelperFactory dataflowHelperFactory =
new
IndexDataflowHelperFactory(metadataProvider.getStorageComponentProvider().getStorageManager(),
- partitioningProperties.getSpiltsProvider());
+ partitioningProperties.getSplitsProvider());
LSMTreeIndexCompactOperatorDescriptor compactOp = new
LSMTreeIndexCompactOperatorDescriptor(spec,
dataflowHelperFactory,
partitioningProperties.getComputeStorageMap());
compactOp.setSourceLocation(sourceLoc);
diff --git
a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
index ea19a783f2..8a4b3f00bd 100644
---
a/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
+++
b/hyracks-fullstack/algebricks/algebricks-core/src/main/java/org/apache/hyracks/algebricks/core/algebra/operators/physical/DataSourceScanPOperator.java
@@ -124,9 +124,7 @@ public class DataSourceScanPOperator extends
AbstractScanPOperator {
IOperatorDescriptor opDesc = p.first;
opDesc.setSourceLocation(scan.getSourceLocation());
builder.contributeHyracksOperator(scan, opDesc);
- if (p.second != null) {
- builder.contributeAlgebricksPartitionConstraint(opDesc, p.second);
- }
+ builder.contributeAlgebricksPartitionConstraint(opDesc, p.second);
ILogicalOperator srcExchange = scan.getInputs().get(0).getValue();
builder.contributeGraphEdge(srcExchange, 0, scan, 0);
diff --git
a/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
b/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
index d3def460aa..422aef36fc 100644
---
a/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
+++
b/hyracks-fullstack/hyracks/hyracks-storage-am-common/src/main/java/org/apache/hyracks/storage/am/common/dataflow/IndexInsertUpdateDeleteOperatorNodePushable.java
@@ -76,7 +76,7 @@ public class IndexInsertUpdateDeleteOperatorNodePushable
extends AbstractUnaryIn
ITupleFilterFactory tupleFilterFactory, ITuplePartitionerFactory
tuplePartitionerFactory,
int[][] partitionsMap) throws HyracksDataException {
this.ctx = ctx;
- this.partitions = partitionsMap != null ? partitionsMap[partition] :
new int[] { partition };
+ this.partitions = partitionsMap[partition];
this.indexes = new IIndex[partitions.length];
this.indexAccessors = new IIndexAccessor[partitions.length];
this.modCallbacks = new
IModificationOperationCallback[partitions.length];