This is an automated email from the ASF dual-hosted git repository.
yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 57a08466432 [MINOR] Fix usages of orElse (#10435)
57a08466432 is described below
commit 57a084664326bb700d7c835a9342c812c57ee851
Author: Tim Brown <[email protected]>
AuthorDate: Wed Jan 10 10:20:17 2024 -0800
[MINOR] Fix usages of orElse (#10435)
---
.../hudi/client/BaseHoodieTableServiceClient.java | 4 ++--
.../apache/hudi/client/BaseHoodieWriteClient.java | 2 +-
.../apache/hudi/client/utils/TransactionUtils.java | 2 +-
.../java/org/apache/hudi/table/HoodieTable.java | 4 ++--
.../BaseHoodieFunctionalIndexClient.java | 2 +-
.../action/savepoint/SavepointActionExecutor.java | 2 +-
.../hudi/client/HoodieFlinkTableServiceClient.java | 2 +-
.../table/action/commit/JavaBulkInsertHelper.java | 2 +-
.../MultipleSparkJobExecutionStrategy.java | 2 +-
.../table/action/commit/SparkBulkInsertHelper.java | 2 +-
.../SparkInsertOverwriteCommitActionExecutor.java | 2 +-
.../org/apache/hudi/AvroConversionUtils.scala | 22 +++++++++-------------
.../org/apache/hudi/BaseHoodieTableFileIndex.java | 4 ++--
.../apache/hudi/common/config/HoodieConfig.java | 2 +-
.../table/log/AbstractHoodieLogRecordReader.java | 2 +-
.../util/queue/BaseHoodieQueueBasedExecutor.java | 2 +-
.../apache/hudi/expression/PartialBindVisitor.java | 4 ++--
.../index/secondary/SecondaryIndexManager.java | 2 +-
.../apache/hudi/metadata/BaseTableMetadata.java | 2 +-
.../hudi/metadata/HoodieBackedTableMetadata.java | 2 +-
.../hudi/metadata/HoodieTableMetadataUtil.java | 4 ++--
.../org/apache/hudi/table/HoodieTableSource.java | 6 +++---
.../hadoop/HoodieCopyOnWriteTableInputFormat.java | 7 ++++---
.../realtime/TestHoodieRealtimeRecordReader.java | 2 +-
.../hudi/connect/utils/KafkaConnectUtils.java | 2 +-
.../BaseDatasetBulkInsertCommitActionExecutor.java | 2 +-
.../apache/hudi/cli/HDFSParquetImporterUtils.java | 2 +-
.../service/handlers/FileSliceHandler.java | 4 ++--
.../timeline/service/handlers/TimelineHandler.java | 4 ++--
.../converter/JsonToAvroSchemaConverter.java | 8 ++++----
.../hudi/utilities/sources/JsonDFSSource.java | 2 +-
.../apache/hudi/utilities/streamer/StreamSync.java | 6 +++---
.../utilities/transform/ChainedTransformer.java | 8 ++------
33 files changed, 59 insertions(+), 66 deletions(-)
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieTableServiceClient.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieTableServiceClient.java
index 2ba0d553f58..5b54151dc4c 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieTableServiceClient.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieTableServiceClient.java
@@ -495,7 +495,7 @@ public abstract class BaseHoodieTableServiceClient<I, T, O>
extends BaseHoodieCl
preCommit(metadata);
}
// Update table's metadata (table)
- writeTableMetadata(table, clusteringInstant.getTimestamp(), metadata,
writeStatuses.orElse(context.emptyHoodieData()));
+ writeTableMetadata(table, clusteringInstant.getTimestamp(), metadata,
writeStatuses.orElseGet(context::emptyHoodieData));
LOG.info("Committing Clustering " + clusteringCommitTime + ". Finished
with result " + metadata);
@@ -1016,7 +1016,7 @@ public abstract class BaseHoodieTableServiceClient<I, T,
O> extends BaseHoodieCl
@Deprecated
public boolean rollback(final String commitInstantTime,
Option<HoodiePendingRollbackInfo> pendingRollbackInfo, boolean skipLocking)
throws HoodieRollbackException {
final String rollbackInstantTime = pendingRollbackInfo.map(entry ->
entry.getRollbackInstant().getTimestamp())
- .orElse(createNewInstantTime(!skipLocking));
+ .orElseGet(() -> createNewInstantTime(!skipLocking));
return rollback(commitInstantTime, pendingRollbackInfo,
rollbackInstantTime, skipLocking);
}
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieWriteClient.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieWriteClient.java
index a3aa6699027..a510dd0bf4a 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieWriteClient.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieWriteClient.java
@@ -297,7 +297,7 @@ public abstract class BaseHoodieWriteClient<T, I, K, O>
extends BaseHoodieClient
InternalSchema internalSchema;
Schema avroSchema =
HoodieAvroUtils.createHoodieWriteSchema(config.getSchema(),
config.allowOperationMetadataField());
if (historySchemaStr.isEmpty()) {
- internalSchema =
SerDeHelper.fromJson(config.getInternalSchema()).orElse(AvroInternalSchemaConverter.convert(avroSchema));
+ internalSchema =
SerDeHelper.fromJson(config.getInternalSchema()).orElseGet(() ->
AvroInternalSchemaConverter.convert(avroSchema));
internalSchema.setSchemaId(Long.parseLong(instantTime));
} else {
internalSchema =
InternalSchemaUtils.searchSchema(Long.parseLong(instantTime),
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/utils/TransactionUtils.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/utils/TransactionUtils.java
index 1bea51721c8..0277492ca09 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/utils/TransactionUtils.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/utils/TransactionUtils.java
@@ -81,7 +81,7 @@ public class TransactionUtils {
table.getMetaClient(), currentTxnOwnerInstant.get(),
lastCompletedTxnOwnerInstant),
completedInstantsDuringCurrentWriteOperation);
- final ConcurrentOperation thisOperation = new
ConcurrentOperation(currentTxnOwnerInstant.get(), thisCommitMetadata.orElse(new
HoodieCommitMetadata()));
+ final ConcurrentOperation thisOperation = new
ConcurrentOperation(currentTxnOwnerInstant.get(),
thisCommitMetadata.orElseGet(HoodieCommitMetadata::new));
instantStream.forEach(instant -> {
try {
ConcurrentOperation otherOperation = new
ConcurrentOperation(instant, table.getMetaClient());
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTable.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTable.java
index e4afb885ec4..ee0006b0b05 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTable.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/HoodieTable.java
@@ -641,7 +641,7 @@ public abstract class HoodieTable<T, I, K, O> implements
Serializable {
Function<String,
Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
final String commitTime =
getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry
-> entry.getRollbackInstant().getTimestamp())
- .orElse(getMetaClient().createNewInstantTime());
+ .orElseGet(() -> getMetaClient().createNewInstantTime());
scheduleRollback(context, commitTime, inflightInstant, false,
config.shouldRollbackUsingMarkers(),
false);
rollback(context, commitTime, inflightInstant, false, false);
@@ -657,7 +657,7 @@ public abstract class HoodieTable<T, I, K, O> implements
Serializable {
public void rollbackInflightLogCompaction(HoodieInstant inflightInstant,
Function<String, Option<HoodiePendingRollbackInfo>>
getPendingRollbackInstantFunc) {
final String commitTime =
getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry
-> entry.getRollbackInstant().getTimestamp())
- .orElse(getMetaClient().createNewInstantTime());
+ .orElseGet(() -> getMetaClient().createNewInstantTime());
scheduleRollback(context, commitTime, inflightInstant, false,
config.shouldRollbackUsingMarkers(),
false);
rollback(context, commitTime, inflightInstant, true, false);
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/functional/BaseHoodieFunctionalIndexClient.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/functional/BaseHoodieFunctionalIndexClient.java
index 0cddcbd116c..2adb7dddeb4 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/functional/BaseHoodieFunctionalIndexClient.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/functional/BaseHoodieFunctionalIndexClient.java
@@ -45,7 +45,7 @@ public abstract class BaseHoodieFunctionalIndexClient {
public void register(HoodieTableMetaClient metaClient, String indexName,
String indexType, Map<String, Map<String, String>> columns, Map<String, String>
options) {
LOG.info("Registering index {} of using {}", indexName, indexType);
String indexMetaPath = metaClient.getTableConfig().getIndexDefinitionPath()
- .orElse(metaClient.getMetaPath() + Path.SEPARATOR +
HoodieTableMetaClient.INDEX_DEFINITION_FOLDER_NAME + Path.SEPARATOR +
HoodieTableMetaClient.INDEX_DEFINITION_FILE_NAME);
+ .orElseGet(() -> metaClient.getMetaPath() + Path.SEPARATOR +
HoodieTableMetaClient.INDEX_DEFINITION_FOLDER_NAME + Path.SEPARATOR +
HoodieTableMetaClient.INDEX_DEFINITION_FILE_NAME);
// build HoodieFunctionalIndexMetadata and then add to index definition
file
metaClient.buildFunctionalIndexDefinition(indexMetaPath, indexName,
indexType, columns, options);
// update table config if necessary
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/savepoint/SavepointActionExecutor.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/savepoint/SavepointActionExecutor.java
index 29da31b478c..1e0330a4def 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/savepoint/SavepointActionExecutor.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/savepoint/SavepointActionExecutor.java
@@ -90,7 +90,7 @@ public class SavepointActionExecutor<T, I, K, O> extends
BaseActionExecutor<T, I
} catch (IOException e) {
throw new HoodieSavepointException("Failed to savepoint " +
instantTime, e);
}
-
}).orElse(table.getCompletedCommitsTimeline().firstInstant().get().getTimestamp());
+ }).orElseGet(() ->
table.getCompletedCommitsTimeline().firstInstant().get().getTimestamp());
// Cannot allow savepoint time on a commit that could have been cleaned
ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(instantTime,
HoodieTimeline.GREATER_THAN_OR_EQUALS, lastCommitRetained),
diff --git
a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkTableServiceClient.java
b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkTableServiceClient.java
index 03a207c4feb..7d4b0b29794 100644
---
a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkTableServiceClient.java
+++
b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkTableServiceClient.java
@@ -133,7 +133,7 @@ public class HoodieFlinkTableServiceClient<T> extends
BaseHoodieTableServiceClie
// commit to data table after committing to metadata table.
// We take the lock here to ensure all writes to metadata table happens
within a single lock (single writer).
// Because more than one write to metadata table will result in
conflicts since all of them updates the same partition.
- writeTableMetadata(table, clusteringCommitTime, metadata,
writeStatuses.orElse(context.emptyHoodieData()));
+ writeTableMetadata(table, clusteringCommitTime, metadata,
writeStatuses.orElseGet(context::emptyHoodieData));
LOG.info("Committing Clustering {} finished with result {}.",
clusteringCommitTime, metadata);
table.getActiveTimeline().transitionReplaceInflightToComplete(
diff --git
a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaBulkInsertHelper.java
b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaBulkInsertHelper.java
index 45010bdf230..5503573656c 100644
---
a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaBulkInsertHelper.java
+++
b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaBulkInsertHelper.java
@@ -78,7 +78,7 @@ public class JavaBulkInsertHelper<T, R> extends
BaseBulkInsertHelper<T, List<Hoo
config.shouldAllowMultiWriteOnSameInstant());
}
- BulkInsertPartitioner partitioner =
userDefinedBulkInsertPartitioner.orElse(JavaBulkInsertInternalPartitionerFactory.get(config.getBulkInsertSortMode()));
+ BulkInsertPartitioner partitioner =
userDefinedBulkInsertPartitioner.orElseGet(() ->
JavaBulkInsertInternalPartitionerFactory.get(config.getBulkInsertSortMode()));
// write new files
List<WriteStatus> writeStatuses = bulkInsert(inputRecords, instantTime,
table, config, performDedupe, partitioner, false,
diff --git
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java
index 4ae8552f6c0..20fc7e9f479 100644
---
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java
+++
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java
@@ -220,7 +220,7 @@ public abstract class MultipleSparkJobExecutionStrategy<T>
default:
throw new UnsupportedOperationException(String.format("Layout
optimization strategy '%s' is not supported", layoutOptStrategy));
}
- }).orElse(isRowPartitioner
+ }).orElseGet(() -> isRowPartitioner
? BulkInsertInternalPartitionerWithRowsFactory.get(getWriteConfig(),
getHoodieTable().isPartitioned(), true)
: BulkInsertInternalPartitionerFactory.get(getHoodieTable(),
getWriteConfig(), true));
}
diff --git
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkBulkInsertHelper.java
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkBulkInsertHelper.java
index fc4b8bf1006..2f57f6bb18b 100644
---
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkBulkInsertHelper.java
+++
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkBulkInsertHelper.java
@@ -74,7 +74,7 @@ public class SparkBulkInsertHelper<T, R> extends
BaseBulkInsertHelper<T, HoodieD
executor.getCommitActionType(), instantTime), Option.empty(),
config.shouldAllowMultiWriteOnSameInstant());
- BulkInsertPartitioner partitioner =
userDefinedBulkInsertPartitioner.orElse(BulkInsertInternalPartitionerFactory.get(table,
config));
+ BulkInsertPartitioner partitioner =
userDefinedBulkInsertPartitioner.orElseGet(() ->
BulkInsertInternalPartitionerFactory.get(table, config));
// Write new files
HoodieData<WriteStatus> writeStatuses =
diff --git
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkInsertOverwriteCommitActionExecutor.java
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkInsertOverwriteCommitActionExecutor.java
index 788e1040783..ac84475bfa4 100644
---
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkInsertOverwriteCommitActionExecutor.java
+++
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkInsertOverwriteCommitActionExecutor.java
@@ -71,7 +71,7 @@ public class SparkInsertOverwriteCommitActionExecutor<T>
protected Partitioner getPartitioner(WorkloadProfile profile) {
return table.getStorageLayout().layoutPartitionerClass()
.map(c -> getLayoutPartitioner(profile, c))
- .orElse(new SparkInsertOverwritePartitioner(profile, context, table,
config));
+ .orElseGet(() -> new SparkInsertOverwritePartitioner(profile, context,
table, config));
}
@Override
diff --git
a/hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/AvroConversionUtils.scala
b/hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/AvroConversionUtils.scala
index d84679eaf92..55877938f8c 100644
---
a/hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/AvroConversionUtils.scala
+++
b/hudi-client/hudi-spark-client/src/main/scala/org/apache/hudi/AvroConversionUtils.scala
@@ -97,19 +97,15 @@ object AvroConversionUtils {
* TODO convert directly from GenericRecord into InternalRow instead
*/
def createDataFrame(rdd: RDD[GenericRecord], schemaStr: String, ss:
SparkSession): Dataset[Row] = {
- if (rdd.isEmpty()) {
- ss.emptyDataFrame
- } else {
- ss.createDataFrame(rdd.mapPartitions { records =>
- if (records.isEmpty) Iterator.empty
- else {
- val schema = new Schema.Parser().parse(schemaStr)
- val dataType = convertAvroSchemaToStructType(schema)
- val converter = createConverterToRow(schema, dataType)
- records.map { r => converter(r) }
- }
- }, convertAvroSchemaToStructType(new Schema.Parser().parse(schemaStr)))
- }
+ ss.createDataFrame(rdd.mapPartitions { records =>
+ if (records.isEmpty) Iterator.empty
+ else {
+ val schema = new Schema.Parser().parse(schemaStr)
+ val dataType = convertAvroSchemaToStructType(schema)
+ val converter = createConverterToRow(schema, dataType)
+ records.map { r => converter(r) }
+ }
+ }, convertAvroSchemaToStructType(new Schema.Parser().parse(schemaStr)))
}
/**
diff --git
a/hudi-common/src/main/java/org/apache/hudi/BaseHoodieTableFileIndex.java
b/hudi-common/src/main/java/org/apache/hudi/BaseHoodieTableFileIndex.java
index 824a94abab4..bf7e25393c8 100644
--- a/hudi-common/src/main/java/org/apache/hudi/BaseHoodieTableFileIndex.java
+++ b/hudi-common/src/main/java/org/apache/hudi/BaseHoodieTableFileIndex.java
@@ -144,7 +144,7 @@ public abstract class BaseHoodieTableFileIndex implements
AutoCloseable {
Option<String> beginInstantTime,
Option<String> endInstantTime) {
this.partitionColumns = metaClient.getTableConfig().getPartitionFields()
- .orElse(new String[0]);
+ .orElseGet(() -> new String[0]);
this.metadataConfig = HoodieMetadataConfig.newBuilder()
.fromProperties(configProperties)
@@ -284,7 +284,7 @@ public abstract class BaseHoodieTableFileIndex implements
AutoCloseable {
queryInstant.map(instant ->
fileSystemView.getLatestMergedFileSlicesBeforeOrOn(partitionPath.path,
queryInstant.get())
)
-
.orElse(fileSystemView.getLatestFileSlices(partitionPath.path))
+ .orElseGet(() ->
fileSystemView.getLatestFileSlices(partitionPath.path))
.collect(Collectors.toList())
));
}
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieConfig.java
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieConfig.java
index cd438aa965c..85d00ecb18d 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieConfig.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieConfig.java
@@ -164,7 +164,7 @@ public class HoodieConfig implements Serializable {
public <T> Integer getIntOrDefault(ConfigProperty<T> configProperty) {
Option<Object> rawValue = getRawValue(configProperty);
return rawValue.map(v -> Integer.parseInt(v.toString()))
- .orElse(Integer.parseInt(configProperty.defaultValue().toString()));
+ .orElseGet(() ->
Integer.parseInt(configProperty.defaultValue().toString()));
}
public <T> Boolean getBoolean(ConfigProperty<T> configProperty) {
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
index 17c77d807c0..4fc7996c873 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/log/AbstractHoodieLogRecordReader.java
@@ -956,7 +956,7 @@ public abstract class AbstractHoodieLogRecordReader {
.orElse(Function.identity());
Schema schema = schemaEvolutionTransformerOpt.map(Pair::getRight)
- .orElse(dataBlock.getSchema());
+ .orElseGet(dataBlock::getSchema);
return Pair.of(new CloseableMappingIterator<>(blockRecordsIterator,
transformer), schema);
}
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BaseHoodieQueueBasedExecutor.java
b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BaseHoodieQueueBasedExecutor.java
index 86011e865dc..20b9c802f60 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BaseHoodieQueueBasedExecutor.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BaseHoodieQueueBasedExecutor.java
@@ -131,7 +131,7 @@ public abstract class BaseHoodieQueueBasedExecutor<I, O, E>
implements HoodieExe
return (Void) null;
}, consumerExecutorService)
)
- .orElse(CompletableFuture.completedFuture(null));
+ .orElseGet(() -> CompletableFuture.completedFuture(null));
}
@Override
diff --git
a/hudi-common/src/main/java/org/apache/hudi/expression/PartialBindVisitor.java
b/hudi-common/src/main/java/org/apache/hudi/expression/PartialBindVisitor.java
index cece36291df..5e86570d291 100644
---
a/hudi-common/src/main/java/org/apache/hudi/expression/PartialBindVisitor.java
+++
b/hudi-common/src/main/java/org/apache/hudi/expression/PartialBindVisitor.java
@@ -108,14 +108,14 @@ public class PartialBindVisitor extends BindVisitor {
Predicates.IsNull isNull = (Predicates.IsNull) predicate;
return Option.ofNullable(isNull.child.accept(this))
.map(expr -> (Expression)Predicates.isNull(expr))
- .orElse(alwaysTrue());
+ .orElseGet(this::alwaysTrue);
}
if (predicate instanceof Predicates.IsNotNull) {
Predicates.IsNotNull isNotNull = (Predicates.IsNotNull) predicate;
return Option.ofNullable(isNotNull.child.accept(this))
.map(expr -> (Expression)Predicates.isNotNull(expr))
- .orElse(alwaysTrue());
+ .orElseGet(this::alwaysTrue);
}
if (predicate instanceof Predicates.StringStartsWith) {
diff --git
a/hudi-common/src/main/java/org/apache/hudi/index/secondary/SecondaryIndexManager.java
b/hudi-common/src/main/java/org/apache/hudi/index/secondary/SecondaryIndexManager.java
index 9c6d92821dd..6b508da77f8 100644
---
a/hudi-common/src/main/java/org/apache/hudi/index/secondary/SecondaryIndexManager.java
+++
b/hudi-common/src/main/java/org/apache/hudi/index/secondary/SecondaryIndexManager.java
@@ -116,7 +116,7 @@ public class SecondaryIndexManager {
List<HoodieSecondaryIndex> newSecondaryIndexes = secondaryIndexes.map(h ->
{
h.add(secondaryIndexToAdd);
return h;
- }).orElse(Collections.singletonList(secondaryIndexToAdd));
+ }).orElseGet(() -> Collections.singletonList(secondaryIndexToAdd));
newSecondaryIndexes.sort(new HoodieSecondaryIndex.HoodieIndexCompactor());
// Persistence secondary indexes' metadata to hoodie.properties file
diff --git
a/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
b/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
index 1b7c2db2daa..ccb0968b169 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
@@ -358,7 +358,7 @@ public abstract class BaseTableMetadata extends
AbstractHoodieTableMetadata {
throw new HoodieIOException("Failed to extract file-statuses from the
payload", e);
}
})
- .orElse(new FileStatus[0]);
+ .orElseGet(() -> new FileStatus[0]);
LOG.info("Listed file in partition from metadata: partition=" +
relativePartitionPath + ", #files=" + statuses.length);
return statuses;
diff --git
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
index 2bb8e0a59ec..57cc08ab59f 100644
---
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
+++
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
@@ -577,7 +577,7 @@ public class HoodieBackedTableMetadata extends
BaseTableMetadata {
public Map<String, String> stats() {
Set<String> allMetadataPartitionPaths =
Arrays.stream(MetadataPartitionType.values()).map(MetadataPartitionType::getPartitionPath).collect(Collectors.toSet());
- return metrics.map(m -> m.getStats(true, metadataMetaClient, this,
allMetadataPartitionPaths)).orElse(new HashMap<>());
+ return metrics.map(m -> m.getStats(true, metadataMetaClient, this,
allMetadataPartitionPaths)).orElseGet(HashMap::new);
}
@Override
diff --git
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
index 839a7ed41a3..076aac0454f 100644
---
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
+++
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
@@ -1029,7 +1029,7 @@ public class HoodieTableMetadataUtil {
Option<HoodieTableFileSystemView> fileSystemView,
String partition,
boolean
mergeFileSlices) {
- HoodieTableFileSystemView fsView =
fileSystemView.orElse(getFileSystemView(metaClient));
+ HoodieTableFileSystemView fsView = fileSystemView.orElseGet(() ->
getFileSystemView(metaClient));
Stream<FileSlice> fileSliceStream;
if (mergeFileSlices) {
if
(metaClient.getActiveTimeline().filterCompletedInstants().lastInstant().isPresent())
{
@@ -1057,7 +1057,7 @@ public class HoodieTableMetadataUtil {
public static List<FileSlice>
getPartitionLatestFileSlicesIncludingInflight(HoodieTableMetaClient metaClient,
Option<HoodieTableFileSystemView> fileSystemView,
String partition) {
- HoodieTableFileSystemView fsView =
fileSystemView.orElse(getFileSystemView(metaClient));
+ HoodieTableFileSystemView fsView = fileSystemView.orElseGet(() ->
getFileSystemView(metaClient));
Stream<FileSlice> fileSliceStream =
fsView.fetchLatestFileSlicesIncludingInflight(partition);
return fileSliceStream
.sorted(Comparator.comparing(FileSlice::getFileId))
diff --git
a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
index b4ef68a3939..5d1b01981dd 100644
---
a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
+++
b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
@@ -176,12 +176,12 @@ public class HoodieTableSource implements
this.dataPruner = dataPruner;
this.partitionPruner = partitionPruner;
this.dataBucket = dataBucket;
- this.requiredPos =
Optional.ofNullable(requiredPos).orElse(IntStream.range(0,
this.tableRowType.getFieldCount()).toArray());
+ this.requiredPos = Optional.ofNullable(requiredPos).orElseGet(() ->
IntStream.range(0, this.tableRowType.getFieldCount()).toArray());
this.limit = Optional.ofNullable(limit).orElse(NO_LIMIT_CONSTANT);
this.hadoopConf = HadoopConfigurations.getHadoopConf(conf);
- this.metaClient =
Optional.ofNullable(metaClient).orElse(StreamerUtil.metaClientForReader(conf,
hadoopConf));
+ this.metaClient = Optional.ofNullable(metaClient).orElseGet(() ->
StreamerUtil.metaClientForReader(conf, hadoopConf));
this.maxCompactionMemoryInBytes =
StreamerUtil.getMaxCompactionMemoryInBytes(conf);
- this.internalSchemaManager =
Optional.ofNullable(internalSchemaManager).orElse(InternalSchemaManager.get(this.conf,
this.metaClient));
+ this.internalSchemaManager =
Optional.ofNullable(internalSchemaManager).orElseGet(() ->
InternalSchemaManager.get(this.conf, this.metaClient));
}
@Override
diff --git
a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieCopyOnWriteTableInputFormat.java
b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieCopyOnWriteTableInputFormat.java
index 04b65d8878a..88302ec74ce 100644
---
a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieCopyOnWriteTableInputFormat.java
+++
b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieCopyOnWriteTableInputFormat.java
@@ -48,8 +48,11 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
+
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
@@ -60,8 +63,6 @@ import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import static org.apache.hudi.common.config.HoodieMetadataConfig.ENABLE;
@@ -290,7 +291,7 @@ public class HoodieCopyOnWriteTableInputFormat extends
HoodieTableInputFormat {
List<FileSlice> fileSlices = queryInstant.map(
instant ->
fsView.getLatestMergedFileSlicesBeforeOrOn(relativePartitionPath, instant))
- .orElse(fsView.getLatestFileSlices(relativePartitionPath))
+ .orElseGet(() ->
fsView.getLatestFileSlices(relativePartitionPath))
.collect(Collectors.toList());
filteredFileSlices.addAll(fileSlices);
diff --git
a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
index 68f425fd998..15d983ee48b 100644
---
a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
+++
b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieRealtimeRecordReader.java
@@ -289,7 +289,7 @@ public class TestHoodieRealtimeRecordReader {
return Arrays.stream(new File("/tmp").listFiles())
.filter(f -> f.isDirectory() && f.getName().startsWith("hudi-" +
diskType) && f.lastModified() > startTime && f.lastModified() < endTime)
.findFirst()
- .orElse(new File(""));
+ .orElseGet(() -> new File(""));
}
@Test
diff --git
a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
index 1e27b29ae2d..cce507b9fca 100644
---
a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
+++
b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
@@ -189,7 +189,7 @@ public class KafkaConnectUtils {
if (keyGenerator instanceof CustomAvroKeyGenerator) {
return ((BaseKeyGenerator)
keyGenerator).getPartitionPathFields().stream().map(
pathField ->
Arrays.stream(pathField.split(CustomAvroKeyGenerator.SPLIT_REGEX))
- .findFirst().orElse("Illegal partition path field format:
'$pathField' for ${c.getClass.getSimpleName}"))
+ .findFirst().orElseGet(() -> "Illegal partition path field
format: '$pathField' for ${c.getClass.getSimpleName}"))
.collect(Collectors.joining(","));
}
diff --git
a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/commit/BaseDatasetBulkInsertCommitActionExecutor.java
b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/commit/BaseDatasetBulkInsertCommitActionExecutor.java
index 1e20e4ab663..6719b7356e1 100644
---
a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/commit/BaseDatasetBulkInsertCommitActionExecutor.java
+++
b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/commit/BaseDatasetBulkInsertCommitActionExecutor.java
@@ -82,7 +82,7 @@ public abstract class
BaseDatasetBulkInsertCommitActionExecutor implements Seria
hoodieWriteMetadata.setWriteStatuses(HoodieJavaRDD.getJavaRDD(statuses));
hoodieWriteMetadata.setPartitionToReplaceFileIds(getPartitionToReplacedFileIds(statuses));
return hoodieWriteMetadata;
- }).orElse(new HoodieWriteMetadata<>());
+ }).orElseGet(HoodieWriteMetadata::new);
}
public final HoodieWriteResult execute(Dataset<Row> records, boolean
isTablePartitioned) {
diff --git
a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
index 69dd8ea795a..9783113117c 100644
---
a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
+++
b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
@@ -277,7 +277,7 @@ public class HDFSParquetImporterUtils implements
Serializable {
HoodieCompactionConfig compactionConfig = compactionStrategyClass
.map(strategy ->
HoodieCompactionConfig.newBuilder().withInlineCompaction(false)
.withCompactionStrategy(ReflectionUtils.loadClass(strategy)).build())
-
.orElse(HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
+ .orElseGet(() ->
HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
.withParallelism(parallelism, parallelism)
diff --git
a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
index c2b739c9f8b..4a4226724f8 100644
---
a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
+++
b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/FileSliceHandler.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -97,7 +97,7 @@ public class FileSliceHandler extends Handler {
public List<FileSliceDTO> getLatestFileSlice(String basePath, String
partitionPath, String fileId) {
return
viewManager.getFileSystemView(basePath).getLatestFileSlice(partitionPath,
fileId)
- .map(FileSliceDTO::fromFileSlice).map(Arrays::asList).orElse(new
ArrayList<>());
+
.map(FileSliceDTO::fromFileSlice).map(Arrays::asList).orElse(Collections.emptyList());
}
public List<CompactionOpDTO> getPendingCompactionOperations(String basePath)
{
diff --git
a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
index 5d788ac74fc..b9a721aae36 100644
---
a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
+++
b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/TimelineHandler.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
/**
@@ -43,7 +43,7 @@ public class TimelineHandler extends Handler {
public List<InstantDTO> getLastInstant(String basePath) {
return
viewManager.getFileSystemView(basePath).getLastInstant().map(InstantDTO::fromInstant)
- .map(Arrays::asList).orElse(new ArrayList<>());
+ .map(Arrays::asList).orElse(Collections.emptyList());
}
public TimelineDTO getTimeline(String basePath) {
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/converter/JsonToAvroSchemaConverter.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/converter/JsonToAvroSchemaConverter.java
index 794de225a5e..9f892ab8f0e 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/converter/JsonToAvroSchemaConverter.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/converter/JsonToAvroSchemaConverter.java
@@ -78,12 +78,12 @@ public class JsonToAvroSchemaConverter implements
SchemaRegistryProvider.SchemaC
}
private static ArrayNode convertProperties(JsonNode jsonProperties,
Set<String> required) {
- List<JsonNode> avroFields = new ArrayList<>();
+ List<JsonNode> avroFields = new ArrayList<>(jsonProperties.size());
jsonProperties.fieldNames().forEachRemaining(name ->
avroFields.add(tryConvertNestedProperty(name, jsonProperties.get(name))
- .or(tryConvertArrayProperty(name, jsonProperties.get(name)))
- .or(tryConvertEnumProperty(name, jsonProperties.get(name)))
- .orElse(convertProperty(name, jsonProperties.get(name),
required.contains(name)))));
+ .or(() -> tryConvertArrayProperty(name, jsonProperties.get(name)))
+ .or(() -> tryConvertEnumProperty(name, jsonProperties.get(name)))
+ .orElseGet(() -> convertProperty(name, jsonProperties.get(name),
required.contains(name)))));
return MAPPER.createArrayNode().addAll(avroFields);
}
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
index 64da4f4f50f..e658bde5853 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonDFSSource.java
@@ -47,7 +47,7 @@ public class JsonDFSSource extends JsonSource {
pathSelector.getNextFilePathsAndMaxModificationTime(sparkContext,
lastCkptStr, sourceLimit);
return selPathsWithMaxModificationTime.getLeft()
.map(pathStr -> new InputBatch<>(Option.of(fromFiles(pathStr)),
selPathsWithMaxModificationTime.getRight()))
- .orElse(new InputBatch<>(Option.empty(),
selPathsWithMaxModificationTime.getRight()));
+ .orElseGet(() -> new InputBatch<>(Option.empty(),
selPathsWithMaxModificationTime.getRight()));
}
private JavaRDD<String> fromFiles(String pathStr) {
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
index 260e4d48073..0407bea1935 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
@@ -615,7 +615,7 @@ public class StreamSync implements Serializable, Closeable {
AvroConversionUtils.convertStructTypeToAvroSchema(df.schema(),
getAvroRecordQualifiedName(cfg.targetTableName)));
schemaProvider = incomingSchemaOpt.map(incomingSchema ->
getDeducedSchemaProvider(incomingSchema, dataAndCheckpoint.getSchemaProvider(),
metaClient))
- .orElse(dataAndCheckpoint.getSchemaProvider());
+ .orElseGet(dataAndCheckpoint::getSchemaProvider);
if (useRowWriter) {
inputBatchForWriter = new InputBatch(transformed, checkpointStr,
schemaProvider);
@@ -903,12 +903,12 @@ public class StreamSync implements Serializable,
Closeable {
instantTime = startCommit(instantTime, !autoGenerateRecordKeys);
if (useRowWriter) {
- Dataset<Row> df = (Dataset<Row>)
inputBatch.getBatch().orElse(hoodieSparkContext.getSqlContext().emptyDataFrame());
+ Dataset<Row> df = (Dataset<Row>) inputBatch.getBatch().orElseGet(() ->
hoodieSparkContext.getSqlContext().emptyDataFrame());
HoodieWriteConfig hoodieWriteConfig =
prepareHoodieConfigForRowWriter(inputBatch.getSchemaProvider().getTargetSchema());
BaseDatasetBulkInsertCommitActionExecutor executor = new
HoodieStreamerDatasetBulkInsertCommitActionExecutor(hoodieWriteConfig,
writeClient, instantTime);
writeClientWriteResult = new WriteClientWriteResult(executor.execute(df,
!HoodieStreamerUtils.getPartitionColumns(props).isEmpty()).getWriteStatuses());
} else {
- JavaRDD<HoodieRecord> records = (JavaRDD<HoodieRecord>)
inputBatch.getBatch().orElse(hoodieSparkContext.emptyRDD());
+ JavaRDD<HoodieRecord> records = (JavaRDD<HoodieRecord>)
inputBatch.getBatch().orElseGet(() -> hoodieSparkContext.emptyRDD());
// filter dupes if needed
if (cfg.filterDupes) {
records = DataSourceUtils.dropDuplicates(hoodieSparkContext.jsc(),
records, writeClient.getConfig());
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/ChainedTransformer.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/ChainedTransformer.java
index 367448533b3..4ff7dd6e1c2 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/ChainedTransformer.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/ChainedTransformer.java
@@ -124,12 +124,8 @@ public class ChainedTransformer implements Transformer {
throw new HoodieTransformPlanException("Either source schema or source
dataset should be available to fetch the schema");
}
StructType incomingStruct = incomingStructOpt
- .orElse(sourceSchemaOpt.isPresent() ?
AvroConversionUtils.convertAvroSchemaToStructType(sourceSchemaOpt.get()) :
rowDatasetOpt.get().schema());
- try {
- return transformerInfo.getTransformer().transformedSchema(jsc,
sparkSession, incomingStruct, properties).asNullable();
- } catch (Exception e) {
- throw e;
- }
+ .orElseGet(() -> sourceSchemaOpt.isPresent() ?
AvroConversionUtils.convertAvroSchemaToStructType(sourceSchemaOpt.get()) :
rowDatasetOpt.get().schema());
+ return transformerInfo.getTransformer().transformedSchema(jsc,
sparkSession, incomingStruct, properties).asNullable();
}
@Override