This is an automated email from the ASF dual-hosted git repository.
yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 889b2cda727 [MINOR] Replace 'obj == null' with Optional.ofNullable()
in HoodieTableSource, make the code more standardized (#8297)
889b2cda727 is described below
commit 889b2cda7272208c98df1e4d2bfb3f2c5de953be
Author: FlechazoW <[email protected]>
AuthorDate: Mon Sep 25 05:13:53 2023 +0800
[MINOR] Replace 'obj == null' with Optional.ofNullable() in
HoodieTableSource, make the code more standardized (#8297)
Co-authored-by: Y Ethan Guo <[email protected]>
---
.../java/org/apache/hudi/table/HoodieTableSource.java | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)
diff --git
a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
index 03eb3205e8c..e4b8db33516 100644
---
a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
+++
b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
@@ -100,6 +100,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.StringJoiner;
import java.util.concurrent.atomic.AtomicInteger;
@@ -120,7 +121,7 @@ public class HoodieTableSource implements
SupportsFilterPushDown {
private static final Logger LOG =
LoggerFactory.getLogger(HoodieTableSource.class);
- private static final int NO_LIMIT_CONSTANT = -1;
+ private static final long NO_LIMIT_CONSTANT = -1;
private final transient org.apache.hadoop.conf.Configuration hadoopConf;
private final transient HoodieTableMetaClient metaClient;
@@ -171,20 +172,16 @@ public class HoodieTableSource implements
this.partitionKeys = partitionKeys;
this.defaultPartName = defaultPartName;
this.conf = conf;
- this.predicates = predicates == null ? Collections.emptyList() :
predicates;
+ this.predicates =
Optional.ofNullable(predicates).orElse(Collections.emptyList());
this.dataPruner = dataPruner;
this.partitionPruner = partitionPruner;
this.dataBucket = dataBucket;
- this.requiredPos = requiredPos == null
- ? IntStream.range(0, this.tableRowType.getFieldCount()).toArray()
- : requiredPos;
- this.limit = limit == null ? NO_LIMIT_CONSTANT : limit;
+ this.requiredPos =
Optional.ofNullable(requiredPos).orElse(IntStream.range(0,
this.tableRowType.getFieldCount()).toArray());
+ this.limit = Optional.ofNullable(limit).orElse(NO_LIMIT_CONSTANT);
this.hadoopConf = HadoopConfigurations.getHadoopConf(conf);
- this.metaClient = metaClient == null ?
StreamerUtil.metaClientForReader(conf, hadoopConf) : metaClient;
+ this.metaClient =
Optional.ofNullable(metaClient).orElse(StreamerUtil.metaClientForReader(conf,
hadoopConf));
this.maxCompactionMemoryInBytes =
StreamerUtil.getMaxCompactionMemoryInBytes(conf);
- this.internalSchemaManager = internalSchemaManager == null
- ? InternalSchemaManager.get(this.conf, this.metaClient)
- : internalSchemaManager;
+ this.internalSchemaManager =
Optional.ofNullable(internalSchemaManager).orElse(InternalSchemaManager.get(this.conf,
this.metaClient));
}
@Override