This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 57fdd8b46c12 refactor: Add Lombok annotations to hudi-hadoop-common 
module (#17662)
57fdd8b46c12 is described below

commit 57fdd8b46c12c403cedadbd33dfbb09d2e5e3bc7
Author: voonhous <[email protected]>
AuthorDate: Tue Dec 23 05:39:10 2025 +0800

    refactor: Add Lombok annotations to hudi-hadoop-common module (#17662)
---
 .../HoodieRowDataParquetOutputStreamWriter.java    |  2 +-
 .../io/storage/HoodieSparkParquetStreamWriter.java |  2 +-
 hudi-hadoop-common/pom.xml                         |  6 ++
 .../common/config/DFSPropertiesConfiguration.java  | 16 +++--
 .../hudi/common/config/HoodieParquetConfig.java    | 49 ++--------------
 .../common/table/log/HoodieLogFormatWriter.java    | 30 ++++------
 .../apache/hudi/common/util/HadoopConfigUtils.java |  7 +--
 .../org/apache/hudi/common/util/ParquetUtils.java  |  8 +--
 .../org/apache/hudi/hadoop/fs/HadoopFSUtils.java   | 16 ++---
 .../hadoop/fs/HoodieSerializableFileStatus.java    | 68 ++--------------------
 .../hudi/hadoop/fs/HoodieWrapperFileSystem.java    | 18 ++----
 .../hudi/hadoop/fs/inline/InMemoryFileSystem.java  | 13 ++---
 .../hudi/io/hadoop/HoodieBaseParquetWriter.java    |  2 +-
 .../storage/hadoop/HoodieParquetStreamWriter.java  |  2 +-
 .../parquet/io/HoodieParquetBinaryCopyBase.java    | 18 +++---
 .../parquet/io/HoodieParquetFileBinaryCopier.java  | 13 ++---
 .../hudi/parquet/io/ParquetBinaryCopyChecker.java  | 40 ++++---------
 .../avro/HoodieAvroParquetSchemaConverter.java     |  7 +--
 .../lock/TestInProcessLockProvider.java            | 29 +++++----
 .../transaction/lock/TestNoopLockProvider.java     |  5 +-
 .../fs/TestFSUtilsWithRetryWrapperEnable.java      |  7 +--
 .../table/view/TestHoodieTableFileSystemView.java  |  8 +--
 .../table/view/TestIncrementalFSViewSync.java      | 15 +++--
 .../common/testutils/HoodieCommonTestHarness.java  | 15 +++--
 .../hudi/common/testutils/HoodieTestTable.java     | 21 +++----
 .../testutils/minicluster/HdfsTestService.java     | 20 +++----
 .../io/hadoop/TestHoodieBaseParquetWriter.java     | 17 ++----
 .../io/TestHoodieParquetFileBinaryCopier.java      | 20 ++-----
 .../parquet/io/TestParquetBinaryCopyChecker.java   |  4 +-
 29 files changed, 155 insertions(+), 323 deletions(-)

diff --git 
a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataParquetOutputStreamWriter.java
 
b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataParquetOutputStreamWriter.java
index 46b93df8d22c..ca65291e2c69 100644
--- 
a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataParquetOutputStreamWriter.java
+++ 
b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataParquetOutputStreamWriter.java
@@ -64,7 +64,7 @@ public class HoodieRowDataParquetOutputStreamWriter 
implements HoodieRowDataFile
     parquetWriterbuilder.withRowGroupSize(parquetConfig.getBlockSize());
     parquetWriterbuilder.withPageSize(parquetConfig.getPageSize());
     parquetWriterbuilder.withDictionaryPageSize(parquetConfig.getPageSize());
-    
parquetWriterbuilder.withDictionaryEncoding(parquetConfig.dictionaryEnabled());
+    
parquetWriterbuilder.withDictionaryEncoding(parquetConfig.isDictionaryEnabled());
     
parquetWriterbuilder.withWriterVersion(ParquetWriter.DEFAULT_WRITER_VERSION);
     
parquetWriterbuilder.withConf(parquetConfig.getStorageConf().unwrapAs(Configuration.class));
     this.writer = parquetWriterbuilder.build();
diff --git 
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetStreamWriter.java
 
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetStreamWriter.java
index 4374227d8b20..ab3e4277e286 100644
--- 
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetStreamWriter.java
+++ 
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetStreamWriter.java
@@ -48,7 +48,7 @@ public class HoodieSparkParquetStreamWriter implements 
HoodieSparkFileWriter, Au
         .withRowGroupSize(parquetConfig.getBlockSize())
         .withPageSize(parquetConfig.getPageSize())
         .withDictionaryPageSize(parquetConfig.getPageSize())
-        .withDictionaryEncoding(parquetConfig.dictionaryEnabled())
+        .withDictionaryEncoding(parquetConfig.isDictionaryEnabled())
         .withWriterVersion(ParquetWriter.DEFAULT_WRITER_VERSION)
         .withConf(parquetConfig.getHadoopConf())
         .build();
diff --git a/hudi-hadoop-common/pom.xml b/hudi-hadoop-common/pom.xml
index d12f68c29c3e..55ddf796036f 100644
--- a/hudi-hadoop-common/pom.xml
+++ b/hudi-hadoop-common/pom.xml
@@ -124,6 +124,12 @@
       <artifactId>kryo-shaded</artifactId>
     </dependency>
 
+    <!-- Lombok -->
+    <dependency>
+      <groupId>org.projectlombok</groupId>
+      <artifactId>lombok</artifactId>
+    </dependency>
+
     <dependency>
       <groupId>org.apache.hudi</groupId>
       <artifactId>hudi-tests-common</artifactId>
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
index 046f82ea6b44..ecfc4661cdea 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
@@ -28,9 +28,8 @@ import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.HoodieStorageUtils;
 import org.apache.hudi.storage.StoragePath;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
@@ -57,10 +56,9 @@ import java.util.concurrent.atomic.AtomicReference;
  *
  * Note: Not reusing commons-configuration since it has too many conflicting 
runtime deps.
  */
+@Slf4j
 public class DFSPropertiesConfiguration extends PropertiesConfig {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(DFSPropertiesConfiguration.class);
-
   public static final String DEFAULT_PROPERTIES_FILE = "hudi-defaults.conf";
   public static final String CONF_FILE_DIR_ENV_NAME = "HUDI_CONF_DIR";
   public static final String DEFAULT_CONF_FILE_DIR = "file:/etc/hudi/conf";
@@ -125,7 +123,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
     try {
       conf.addPropsFromFile(DEFAULT_PATH);
     } catch (Exception e) {
-      LOG.warn("Cannot load default config file: {}", DEFAULT_PATH, e);
+      log.warn("Cannot load default config file: {}", DEFAULT_PATH, e);
     }
     Option<StoragePath> defaultConfPath = getConfPathFromEnv();
     if (defaultConfPath.isPresent() && 
!defaultConfPath.get().equals(DEFAULT_PATH)) {
@@ -160,7 +158,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
 
     try {
       if (filePath.equals(DEFAULT_PATH) && !storage.exists(filePath)) {
-        LOG.debug("Properties file {} not found. Ignoring to load props file", 
filePath);
+        log.debug("Properties file {} not found. Ignoring to load props file", 
filePath);
         return;
       }
     } catch (IOException ioe) {
@@ -171,7 +169,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
       visitedFilePaths.add(filePath.toString());
       addPropsFromStream(reader, filePath);
     } catch (IOException ioe) {
-      LOG.error("Error reading in properties from dfs from file " + filePath);
+      log.error("Error reading in properties from dfs from file " + filePath);
       throw new HoodieIOException("Cannot read properties from dfs from file " 
+ filePath, ioe);
     }
   }
@@ -220,7 +218,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
     if (props == null) {
       TypedProperties loaded = loadGlobalProps();
       if (GlobalPropsHolder.INSTANCE.compareAndSet(null, loaded)) {
-        LOG.info("Loaded global properties from configuration");
+        log.info("Loaded global properties from configuration");
       }
       props = GlobalPropsHolder.INSTANCE.get();
     }
@@ -254,7 +252,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
   private static Option<StoragePath> getConfPathFromEnv() {
     String confDir = System.getenv(CONF_FILE_DIR_ENV_NAME);
     if (confDir == null) {
-      LOG.debug("Environment variable " + CONF_FILE_DIR_ENV_NAME + ", not set. 
If desired, set it to the folder containing: " + DEFAULT_PROPERTIES_FILE);
+      log.debug("Environment variable " + CONF_FILE_DIR_ENV_NAME + ", not set. 
If desired, set it to the folder containing: " + DEFAULT_PROPERTIES_FILE);
       return Option.empty();
     }
     if (StringUtils.isNullOrEmpty(URI.create(confDir).getScheme())) {
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/HoodieParquetConfig.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/HoodieParquetConfig.java
index 608b56234748..ab74452856f2 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/HoodieParquetConfig.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/HoodieParquetConfig.java
@@ -20,13 +20,18 @@ package org.apache.hudi.common.config;
 
 import org.apache.hudi.storage.StorageConfiguration;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
 import org.apache.parquet.hadoop.metadata.CompressionCodecName;
 
 /**
  * Base ParquetConfig to hold config params for writing to Parquet.
  * @param <T>
  */
+@AllArgsConstructor
+@Getter
 public class HoodieParquetConfig<T> {
+
   private final T writeSupport;
   private final CompressionCodecName compressionCodecName;
   private final int blockSize;
@@ -35,48 +40,4 @@ public class HoodieParquetConfig<T> {
   private final StorageConfiguration<?> storageConf;
   private final double compressionRatio;
   private final boolean dictionaryEnabled;
-
-  public HoodieParquetConfig(T writeSupport, CompressionCodecName 
compressionCodecName, int blockSize, int pageSize,
-                             long maxFileSize, StorageConfiguration<?> 
storageConf, double compressionRatio, boolean dictionaryEnabled) {
-    this.writeSupport = writeSupport;
-    this.compressionCodecName = compressionCodecName;
-    this.blockSize = blockSize;
-    this.pageSize = pageSize;
-    this.maxFileSize = maxFileSize;
-    this.storageConf = storageConf;
-    this.compressionRatio = compressionRatio;
-    this.dictionaryEnabled = dictionaryEnabled;
-  }
-
-  public CompressionCodecName getCompressionCodecName() {
-    return compressionCodecName;
-  }
-
-  public int getBlockSize() {
-    return blockSize;
-  }
-
-  public int getPageSize() {
-    return pageSize;
-  }
-
-  public long getMaxFileSize() {
-    return maxFileSize;
-  }
-
-  public StorageConfiguration<?> getStorageConf() {
-    return storageConf;
-  }
-
-  public double getCompressionRatio() {
-    return compressionRatio;
-  }
-
-  public T getWriteSupport() {
-    return writeSupport;
-  }
-
-  public boolean dictionaryEnabled() {
-    return dictionaryEnabled;
-  }
 }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatWriter.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatWriter.java
index 38df6535a727..23a69699a43e 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatWriter.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatWriter.java
@@ -26,13 +26,13 @@ import org.apache.hudi.common.util.VisibleForTesting;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.storage.HoodieStorage;
 
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.ipc.RemoteException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -42,14 +42,15 @@ import java.util.List;
 /**
  * HoodieLogFormatWriter can be used to append blocks to a log file Use 
HoodieLogFormat.WriterBuilder to construct.
  */
+@Slf4j
 public class HoodieLogFormatWriter implements HoodieLogFormat.Writer {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieLogFormatWriter.class);
-
+  @Getter
   private HoodieLogFile logFile;
   private FSDataOutputStream output;
 
   private final HoodieStorage storage;
+  @Getter
   private final long sizeThreshold;
   private final Integer bufferSize;
   private final Short replication;
@@ -76,15 +77,6 @@ public class HoodieLogFormatWriter implements 
HoodieLogFormat.Writer {
     addShutDownHook();
   }
 
-  @Override
-  public HoodieLogFile getLogFile() {
-    return logFile;
-  }
-
-  public long getSizeThreshold() {
-    return sizeThreshold;
-  }
-
   /**
    * Overrides the output stream, only for test purpose.
    */
@@ -108,14 +100,14 @@ public class HoodieLogFormatWriter implements 
HoodieLogFormat.Writer {
           }
           // Block size does not matter as we will always manually auto-flush
           createNewFile();
-          LOG.info("Created a new log file: {}", logFile);
+          log.info("Created a new log file: {}", logFile);
           created = true;
         } catch (FileAlreadyExistsException ignored) {
-          LOG.info("File {} already exists, rolling over", logFile.getPath());
+          log.info("File {} already exists, rolling over", logFile.getPath());
           rollOver();
         } catch (RemoteException re) {
           if 
(re.getClassName().contentEquals(AlreadyBeingCreatedException.class.getName())) 
{
-            LOG.warn("Another task executor writing to the same log file({}), 
rolling over", logFile);
+            log.warn("Another task executor writing to the same log file({}), 
rolling over", logFile);
             // Rollover the current log file (since cannot get a stream 
handle) and create new one
             rollOver();
           } else {
@@ -214,7 +206,7 @@ public class HoodieLogFormatWriter implements 
HoodieLogFormat.Writer {
   private void rolloverIfNeeded() throws IOException {
     // Roll over if the size is past the threshold
     if (getCurrentSize() > sizeThreshold) {
-      LOG.info("CurrentSize {} has reached threshold {}. Rolling over to the 
next version", getCurrentSize(), sizeThreshold);
+      log.info("CurrentSize {} has reached threshold {}. Rolling over to the 
next version", getCurrentSize(), sizeThreshold);
       rollOver();
     }
   }
@@ -280,10 +272,10 @@ public class HoodieLogFormatWriter implements 
HoodieLogFormat.Writer {
     shutdownThread = new Thread() {
       public void run() {
         try {
-          LOG.info("running HoodieLogFormatWriter shutdown hook to close 
output stream for log file: {}", logFile);
+          log.info("running HoodieLogFormatWriter shutdown hook to close 
output stream for log file: {}", logFile);
           closeStream();
         } catch (Exception e) {
-          LOG.warn("unable to close output stream for log file: {}", logFile, 
e);
+          log.warn("unable to close output stream for log file: {}", logFile, 
e);
           // fail silently for any sort of exception
         }
       }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
index bececbe7461d..021aecc4e705 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
@@ -21,17 +21,16 @@ package org.apache.hudi.common.util;
 
 import org.apache.hudi.common.config.ConfigProperty;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.util.Properties;
 
 /**
  * Utils on Hadoop {@link Configuration}.
  */
+@Slf4j
 public class HadoopConfigUtils {
-  private static final Logger LOG = 
LoggerFactory.getLogger(HadoopConfigUtils.class);
 
   /**
    * Creates a Hadoop {@link Configuration} instance with the properties.
@@ -62,7 +61,7 @@ public class HadoopConfigUtils {
     for (String alternative : configProperty.getAlternatives()) {
       String altValue = conf.get(alternative);
       if (altValue != null) {
-        LOG.warn("The configuration key '{}' has been deprecated "
+        log.warn("The configuration key '{}' has been deprecated "
             + "and may be removed in the future. Please use the new key '{}' 
instead.",
             alternative, configProperty.key());
         return Option.of(altValue);
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
index 1de5513e424d..0d8cdc3f9940 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
@@ -41,6 +41,7 @@ import org.apache.hudi.stats.ValueType;
 import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.hadoop.conf.Configuration;
@@ -61,8 +62,6 @@ import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.OriginalType;
 import org.apache.parquet.schema.PrimitiveType;
 import org.apache.parquet.schema.Types;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 
@@ -94,10 +93,9 @@ import static 
org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_
 /**
  * Utility functions involving with parquet.
  */
+@Slf4j
 public class ParquetUtils extends FileFormatUtils {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(ParquetUtils.class);
-
   /**
    * Read the rowKey list matching the given filter, from the given parquet 
file. If the filter is empty, then this will
    * return all the rowkeys and corresponding positions.
@@ -258,7 +256,7 @@ public class ParquetUtils extends FileFormatUtils {
       MessageType schema = parquetUtils.readSchema(storage, parquetFilePath);
       return schema.hashCode();
     } catch (Exception e) {
-      LOG.warn("Failed to read schema hash from file: {}", parquetFilePath, e);
+      log.warn("Failed to read schema hash from file: {}", parquetFilePath, e);
       return 0;
     }
   }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HadoopFSUtils.java 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HadoopFSUtils.java
index 02321d4bf7f7..270040507307 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HadoopFSUtils.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HadoopFSUtils.java
@@ -35,6 +35,7 @@ import org.apache.hudi.storage.StoragePathInfo;
 import org.apache.hudi.storage.StorageSchemes;
 import org.apache.hudi.storage.hadoop.HadoopStorageConfiguration;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -45,8 +46,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -65,8 +64,9 @@ import static 
org.apache.hudi.common.fs.FSUtils.LOG_FILE_PATTERN;
 /**
  * Utility functions related to accessing the file storage on Hadoop.
  */
+@Slf4j
 public class HadoopFSUtils {
-  private static final Logger LOG = 
LoggerFactory.getLogger(HadoopFSUtils.class);
+
   private static final String HOODIE_ENV_PROPS_PREFIX = "HOODIE_ENV_";
   private static final int MAX_ATTEMPTS_RECOVER_LEASE = 10;
 
@@ -74,7 +74,7 @@ public class HadoopFSUtils {
     // look for all properties, prefixed to be picked up
     for (Map.Entry<String, String> prop : System.getenv().entrySet()) {
       if (prop.getKey().startsWith(HOODIE_ENV_PROPS_PREFIX)) {
-        LOG.info("Picking up value for hoodie env var : {}", prop.getKey());
+        log.info("Picking up value for hoodie env var : {}", prop.getKey());
         conf.set(prop.getKey().replace(HOODIE_ENV_PROPS_PREFIX, 
"").replaceAll("_DOT_", "."), prop.getValue());
       }
     }
@@ -141,10 +141,10 @@ public class HadoopFSUtils {
     File localFile = new File(path);
     if (!providedPath.isAbsolute() && localFile.exists()) {
       Path resolvedPath = new Path("file://" + localFile.getAbsolutePath());
-      LOG.info("Resolving file {} to be a local file.", path);
+      log.info("Resolving file {} to be a local file.", path);
       return resolvedPath;
     }
-    LOG.info("Resolving file {} to be a remote file.", path);
+    log.info("Resolving file {} to be a remote file.", path);
     return providedPath;
   }
 
@@ -471,11 +471,11 @@ public class HadoopFSUtils {
    */
   public static boolean recoverDFSFileLease(final DistributedFileSystem dfs, 
final Path p)
       throws IOException, InterruptedException {
-    LOG.info("Recover lease on dfs file {}", p);
+    log.info("Recover lease on dfs file {}", p);
     // initiate the recovery
     boolean recovered = false;
     for (int nbAttempt = 0; nbAttempt < MAX_ATTEMPTS_RECOVER_LEASE; 
nbAttempt++) {
-      LOG.info("Attempt {} to recover lease on dfs file {}", nbAttempt, p);
+      log.info("Attempt {} to recover lease on dfs file {}", nbAttempt, p);
       recovered = dfs.recoverLease(p);
       if (recovered) {
         break;
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
index 8a7b5f0d974f..9535fac5cedb 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
@@ -19,6 +19,8 @@
 
 package org.apache.hudi.hadoop.fs;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -35,11 +37,13 @@ import java.util.stream.Collectors;
  * This class is needed because `hudi-hadoop-mr-bundle` relies on Avro 1.8.2,
  * and won't work well with `HoodieFileStatus`
  */
+@AllArgsConstructor
+@Getter
 public class HoodieSerializableFileStatus implements Serializable {
 
   private final Path path;
   private final long length;
-  private final Boolean isDir;
+  private final boolean dir;
   private final short blockReplication;
   private final long blockSize;
   private final long modificationTime;
@@ -49,66 +53,6 @@ public class HoodieSerializableFileStatus implements 
Serializable {
   private final String group;
   private final Path symlink;
 
-  HoodieSerializableFileStatus(Path path, long length, boolean isDir, short 
blockReplication,
-                               long blockSize, long modificationTime, long 
accessTime,
-                               FsPermission permission, String owner, String 
group, Path symlink) {
-    this.path = path;
-    this.length = length;
-    this.isDir = isDir;
-    this.blockReplication = blockReplication;
-    this.blockSize = blockSize;
-    this.modificationTime = modificationTime;
-    this.accessTime = accessTime;
-    this.permission = permission;
-    this.owner = owner;
-    this.group = group;
-    this.symlink = symlink;
-  }
-
-  public Path getPath() {
-    return path;
-  }
-
-  public long getLen() {
-    return length;
-  }
-
-  public Boolean isDirectory() {
-    return isDir;
-  }
-
-  public short getReplication() {
-    return blockReplication;
-  }
-
-  public long getBlockSize() {
-    return blockSize;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public long getAccessTime() {
-    return accessTime;
-  }
-
-  public FsPermission getPermission() {
-    return permission;
-  }
-
-  public String getOwner() {
-    return owner;
-  }
-
-  public String getGroup() {
-    return group;
-  }
-
-  public Path getSymlink() {
-    return symlink;
-  }
-
   public static HoodieSerializableFileStatus fromFileStatus(FileStatus status) 
{
     Path symlink;
     try {
@@ -131,7 +75,7 @@ public class HoodieSerializableFileStatus implements 
Serializable {
   }
 
   public static FileStatus toFileStatus(HoodieSerializableFileStatus status) {
-    return new FileStatus(status.getLen(), status.isDirectory(), 
status.getReplication(),
+    return new FileStatus(status.getLength(), status.isDir(), 
status.getBlockReplication(),
         status.getBlockSize(), status.getModificationTime(), 
status.getAccessTime(), status.getPermission(),
         status.getOwner(), status.getGroup(), status.getSymlink(), 
status.getPath());
   }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
index 276b215fd18e..72e989ee9846 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
@@ -28,6 +28,8 @@ import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.StorageSchemes;
 
+import lombok.Getter;
+import lombok.NoArgsConstructor;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
@@ -70,7 +72,9 @@ import static 
org.apache.hudi.hadoop.fs.HadoopFSUtils.convertToStoragePath;
  * HoodieWrapperFileSystem wraps the default file system. It holds state about 
the open streams in the file system to
  * support getting the written size to each of the open streams.
  */
+@NoArgsConstructor
 public class HoodieWrapperFileSystem extends FileSystem {
+
   public static final String HOODIE_SCHEME_PREFIX = "hoodie-";
 
   private static final String TMP_PATH_POSTFIX = ".tmp";
@@ -94,7 +98,9 @@ public class HoodieWrapperFileSystem extends FileSystem {
 
 
   private final ConcurrentMap<String, SizeAwareFSDataOutputStream> openStreams 
= new ConcurrentHashMap<>();
+  @Getter
   private FileSystem fileSystem;
+  @Getter
   private URI uri;
   private ConsistencyGuard consistencyGuard = new NoOpConsistencyGuard();
 
@@ -136,9 +142,6 @@ public class HoodieWrapperFileSystem extends FileSystem {
     return executeFuncWithTimeMetrics(metricName, p, func);
   }
 
-  public HoodieWrapperFileSystem() {
-  }
-
   public HoodieWrapperFileSystem(FileSystem fileSystem, ConsistencyGuard 
consistencyGuard) {
     this.fileSystem = fileSystem;
     this.uri = fileSystem.getUri();
@@ -198,11 +201,6 @@ public class HoodieWrapperFileSystem extends FileSystem {
     // fileSystem.setConf(conf);
   }
 
-  @Override
-  public URI getUri() {
-    return uri;
-  }
-
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     return wrapInputStream(f, fileSystem.open(convertToDefaultPath(f), 
bufferSize));
@@ -1010,8 +1008,4 @@ public class HoodieWrapperFileSystem extends FileSystem {
     throw new IllegalArgumentException(
         file + " does not have a open stream. Cannot get the bytes written on 
the stream");
   }
-
-  public FileSystem getFileSystem() {
-    return fileSystem;
-  }
 }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
index 572e593f6d89..2e2c8b86624f 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
@@ -19,6 +19,9 @@
 
 package org.apache.hudi.hadoop.fs.inline;
 
+import lombok.AccessLevel;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -37,17 +40,16 @@ import java.net.URISyntaxException;
  * A FileSystem which stores all content in memory and returns a byte[] when 
{@link #getFileAsBytes()} is called
  * This FileSystem is used only in write path. Does not support any read apis 
except {@link #getFileAsBytes()}.
  */
+@NoArgsConstructor(access = AccessLevel.PACKAGE)
 public class InMemoryFileSystem extends FileSystem {
 
   // TODO: this needs to be per path to support num_cores > 1, and we should 
release the buffer once done
   private ByteArrayOutputStream bos;
   private Configuration conf = null;
   public static final String SCHEME = "inmemfs";
+  @Getter
   private URI uri;
 
-  InMemoryFileSystem() {
-  }
-
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
     super.initialize(name, conf);
@@ -55,11 +57,6 @@ public class InMemoryFileSystem extends FileSystem {
     this.uri = name;
   }
 
-  @Override
-  public URI getUri() {
-    return uri;
-  }
-
   public String getScheme() {
     return SCHEME;
   }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
index ffacac30ca95..b87701f27cd8 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
@@ -73,7 +73,7 @@ public abstract class HoodieBaseParquetWriter<R> implements 
Closeable {
     parquetWriterbuilder.withRowGroupSize(parquetConfig.getBlockSize());
     parquetWriterbuilder.withPageSize(parquetConfig.getPageSize());
     parquetWriterbuilder.withDictionaryPageSize(parquetConfig.getPageSize());
-    
parquetWriterbuilder.withDictionaryEncoding(parquetConfig.dictionaryEnabled());
+    
parquetWriterbuilder.withDictionaryEncoding(parquetConfig.isDictionaryEnabled());
     
parquetWriterbuilder.withValidation(ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED);
     
parquetWriterbuilder.withWriterVersion(ParquetWriter.DEFAULT_WRITER_VERSION);
     parquetWriterbuilder.withConf(HadoopFSUtils.registerFileSystem(file, 
hadoopConf));
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/storage/hadoop/HoodieParquetStreamWriter.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/storage/hadoop/HoodieParquetStreamWriter.java
index dacbb14ac2ba..9fea0c665d3b 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/storage/hadoop/HoodieParquetStreamWriter.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/storage/hadoop/HoodieParquetStreamWriter.java
@@ -55,7 +55,7 @@ public class HoodieParquetStreamWriter implements 
HoodieAvroFileWriter, AutoClos
         .withRowGroupSize(parquetConfig.getBlockSize())
         .withPageSize(parquetConfig.getPageSize())
         .withDictionaryPageSize(parquetConfig.getPageSize())
-        .withDictionaryEncoding(parquetConfig.dictionaryEnabled())
+        .withDictionaryEncoding(parquetConfig.isDictionaryEnabled())
         .withWriterVersion(ParquetWriter.DEFAULT_WRITER_VERSION)
         .withConf(parquetConfig.getStorageConf().unwrapAs(Configuration.class))
         .build();
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetBinaryCopyBase.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetBinaryCopyBase.java
index 46640b619e02..563f0ddf793a 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetBinaryCopyBase.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetBinaryCopyBase.java
@@ -22,6 +22,7 @@ import org.apache.hudi.common.model.HoodieRecord;
 import org.apache.hudi.common.util.VisibleForTesting;
 import org.apache.hudi.exception.HoodieException;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.bytes.BytesInput;
@@ -66,8 +67,6 @@ import org.apache.parquet.schema.GroupType;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.PrimitiveType;
 import org.apache.parquet.schema.Type;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -93,10 +92,9 @@ import static org.apache.parquet.schema.OriginalType.MAP;
  *    3. We need to combine file metas added by hudi, such as 
'hoodie_min_record_key'/'hoodie_max_record_key'/'org.apache.hudi.bloomfilter'
  *    4. We need to overwrite column '_hoodie_file_name' with the output file 
name
  */
+@Slf4j
 public abstract class HoodieParquetBinaryCopyBase implements Closeable {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieParquetBinaryCopyBase.class);
-
   // Key to store original writer version in the file key-value metadata
   public static final String ORIGINAL_CREATED_BY_KEY = "original.created.by";
 
@@ -150,9 +148,9 @@ public abstract class HoodieParquetBinaryCopyBase 
implements Closeable {
           DEFAULT_STATISTICS_TRUNCATE_LENGTH,
           ParquetProperties.DEFAULT_PAGE_WRITE_CHECKSUM_ENABLED);
       writer.start();
-      LOG.info("init writer ");
+      log.info("init writer ");
     } catch (Exception e) {
-      LOG.error("failed to init parquet writer", e);
+      log.error("failed to init parquet writer", e);
       throw new HoodieException(e);
     }
   }
@@ -174,7 +172,7 @@ public abstract class HoodieParquetBinaryCopyBase 
implements Closeable {
       BlockMetaData block,
       String originalCreatedBy) throws IOException {
     if (store == null) {
-      LOG.info("stores is empty");
+      log.info("stores is empty");
       return;
     }
 
@@ -434,7 +432,7 @@ public abstract class HoodieParquetBinaryCopyBase 
implements Closeable {
           pageOrdinal++;
           break;
         default:
-          LOG.debug("skipping page of type {} of size {}", 
pageHeader.getType(), compressedPageSize);
+          log.debug("skipping page of type {} of size {}", 
pageHeader.getType(), compressedPageSize);
           break;
       }
     }
@@ -698,7 +696,7 @@ public abstract class HoodieParquetBinaryCopyBase 
implements Closeable {
             changed = true;
           }
         } catch (InvalidRecordException e) {
-          LOG.debug("field not found due to schema evolution, nothing need to 
do");
+          log.debug("field not found due to schema evolution, nothing need to 
do");
         }
       }
     }
@@ -719,7 +717,7 @@ public abstract class HoodieParquetBinaryCopyBase 
implements Closeable {
           changed = true;
         }
       } catch (Throwable e) {
-        LOG.debug("field not found due to schema evolution, nothing need to 
do");
+        log.debug("field not found due to schema evolution, nothing need to 
do");
       }
     }
     return changed;
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetFileBinaryCopier.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetFileBinaryCopier.java
index 7ca88182d83d..41aca115e254 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetFileBinaryCopier.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/HoodieParquetFileBinaryCopier.java
@@ -18,10 +18,11 @@
 
 package org.apache.hudi.parquet.io;
 
-import org.apache.hudi.util.HoodieFileMetadataMerger;
 import org.apache.hudi.io.storage.HoodieFileBinaryCopier;
 import org.apache.hudi.storage.StoragePath;
+import org.apache.hudi.util.HoodieFileMetadataMerger;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.HadoopReadOptions;
@@ -32,8 +33,6 @@ import org.apache.parquet.hadoop.metadata.FileMetaData;
 import org.apache.parquet.hadoop.util.CompressionConverter;
 import org.apache.parquet.hadoop.util.HadoopInputFile;
 import org.apache.parquet.schema.MessageType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -62,9 +61,9 @@ import java.util.Set;
  *    2) Updated row group offsets
  *    3) Validated schema consistency
  */
+@Slf4j
 public class HoodieParquetFileBinaryCopier extends HoodieParquetBinaryCopyBase 
implements HoodieFileBinaryCopier {
-  
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieParquetFileBinaryCopier.class);
+
   private final CompressionCodecName codecName;
 
   // Reader and relevant states of the in-processing input file
@@ -145,7 +144,7 @@ public class HoodieParquetFileBinaryCopier extends 
HoodieParquetBinaryCopyBase i
   private void initNextReader() throws IOException {
     if (reader != null) {
       reader.close();
-      LOG.info("Finish binary copy input file: {}", reader.getFile());
+      log.info("Finish binary copy input file: {}", reader.getFile());
     }
 
     if (inputFiles.isEmpty()) {
@@ -154,6 +153,6 @@ public class HoodieParquetFileBinaryCopier extends 
HoodieParquetBinaryCopyBase i
     }
 
     reader = inputFiles.poll();
-    LOG.info("Merging input file: {}, remaining files: {}", reader.getFile(), 
inputFiles.size());
+    log.info("Merging input file: {}, remaining files: {}", reader.getFile(), 
inputFiles.size());
   }
 }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/ParquetBinaryCopyChecker.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/ParquetBinaryCopyChecker.java
index 65362dd05124..3498c5b154da 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/ParquetBinaryCopyChecker.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/parquet/io/ParquetBinaryCopyChecker.java
@@ -20,6 +20,10 @@ package org.apache.hudi.parquet.io;
 
 import org.apache.hudi.exception.HoodieIOException;
 
+import lombok.AccessLevel;
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.format.converter.ParquetMetadataConverter;
@@ -45,12 +49,9 @@ import static 
org.apache.hudi.avro.HoodieBloomFilterWriteSupport.HOODIE_AVRO_BLO
 import static 
org.apache.hudi.avro.HoodieBloomFilterWriteSupport.HOODIE_BLOOM_FILTER_TYPE_CODE;
 import static org.apache.hudi.common.bloom.BloomFilterTypeCode.SIMPLE;
 
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
 public class ParquetBinaryCopyChecker {
 
-  private ParquetBinaryCopyChecker() {
-
-  }
-
   /**
    * Verify whether a set of files meet the conditions for binary stream 
copying
    *  1. All input parquet file schema support binary copy
@@ -60,7 +61,7 @@ public class ParquetBinaryCopyChecker {
    * @return
    */
   public static boolean verifyFiles(List<ParquetFileInfo> files) {
-    boolean schemaSupportBinaryCopy = 
files.stream().allMatch(ParquetFileInfo::canBinaryCopy);
+    boolean schemaSupportBinaryCopy = 
files.stream().allMatch(ParquetFileInfo::isBinaryCopyEnabled);
     if (!schemaSupportBinaryCopy) {
       return false;
     }
@@ -215,37 +216,18 @@ public class ParquetBinaryCopyChecker {
     return false;
   }
 
+  @AllArgsConstructor
+  @Getter
   public static class ParquetFileInfo implements Serializable {
-    private final boolean canBinaryCopy;
-    private final String bloomFilterTypeCode;
-    private final String schema;
-
-    public ParquetFileInfo(boolean canBinaryCopy, String 
hoodieBloomFilterTypeCode, String schema) {
-      this.canBinaryCopy = canBinaryCopy;
-      this.bloomFilterTypeCode = hoodieBloomFilterTypeCode;
-      this.schema = schema;
-    }
 
     /**
      * Current file can use binary copy or not
      * Following two case can not support
      *  1. two level List structure, because the result of parquet rewrite is 
three level List structure
      *  2. Decimal types stored via INT32/INT64/INT96, because it can not be 
read by parquet-avro
-     *
-     * @param parquetFields
-     * @return
      */
-    public boolean canBinaryCopy() {
-      return canBinaryCopy;
-    }
-
-    public String getBloomFilterTypeCode() {
-      return bloomFilterTypeCode;
-    }
-
-    public String getSchema() {
-      return schema;
-    }
+    private final boolean binaryCopyEnabled;
+    private final String bloomFilterTypeCode;
+    private final String schema;
   }
-
 }
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/parquet/avro/HoodieAvroParquetSchemaConverter.java
 
b/hudi-hadoop-common/src/main/java/org/apache/parquet/avro/HoodieAvroParquetSchemaConverter.java
index 853b186f19df..a06b9bb0ef40 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/parquet/avro/HoodieAvroParquetSchemaConverter.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/parquet/avro/HoodieAvroParquetSchemaConverter.java
@@ -21,11 +21,10 @@ package org.apache.parquet.avro;
 
 import org.apache.hudi.common.util.ReflectionUtils;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.avro.Schema;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.parquet.schema.MessageType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Parquet-Java AvroSchemaConverter doesn't support local timestamp types 
until version 1.14
@@ -37,15 +36,15 @@ import org.slf4j.LoggerFactory;
  * library AvroSchemaConverter in this case.
  *
  */
+@Slf4j
 public abstract class HoodieAvroParquetSchemaConverter {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieAvroParquetSchemaConverter.class);
   public static HoodieAvroParquetSchemaConverter 
getAvroSchemaConverter(Configuration configuration) {
     try {
       return (HoodieAvroParquetSchemaConverter) 
ReflectionUtils.loadClass(AvroSchemaConverterWithTimestampNTZ.class.getName(),
           new Class<?>[] {Configuration.class}, configuration);
     } catch (Throwable t) {
-      LOG.debug("Failed to load AvroSchemaConverterWithTimestampNTZ, using 
NativeAvroSchemaConverter instead", t);
+      log.debug("Failed to load AvroSchemaConverterWithTimestampNTZ, using 
NativeAvroSchemaConverter instead", t);
       return (HoodieAvroParquetSchemaConverter) 
ReflectionUtils.loadClass(NativeAvroSchemaConverter.class.getName(),
           new Class<?>[] {Configuration.class}, configuration);
     }
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestInProcessLockProvider.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestInProcessLockProvider.java
index c7056374471c..aa1e78d0f863 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestInProcessLockProvider.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestInProcessLockProvider.java
@@ -24,10 +24,9 @@ import org.apache.hudi.common.config.TypedProperties;
 import org.apache.hudi.exception.HoodieLockException;
 import org.apache.hudi.storage.StorageConfiguration;
 
+import lombok.extern.slf4j.Slf4j;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -39,9 +38,9 @@ import static 
org.apache.hudi.common.testutils.HoodieTestUtils.getDefaultStorage
 import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
+@Slf4j
 public class TestInProcessLockProvider {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestInProcessLockProvider.class);
   private final StorageConfiguration<?> storageConf = getDefaultStorageConf();
   private final LockConfiguration lockConfiguration1;
   private final LockConfiguration lockConfiguration2;
@@ -74,9 +73,9 @@ public class TestInProcessLockProvider {
 
     // Writer 1
     assertDoesNotThrow(() -> {
-      LOG.info("Writer 1 tries to acquire the lock.");
+      log.info("Writer 1 tries to acquire the lock.");
       lockProvider1.lock();
-      LOG.info("Writer 1 acquires the lock.");
+      log.info("Writer 1 acquires the lock.");
     });
     // Writer 2 thread in parallel, should block
     // and later acquire the lock once it is released
@@ -84,10 +83,10 @@ public class TestInProcessLockProvider {
       InProcessLockProvider lockProvider2 = new 
InProcessLockProvider(lockConfiguration1, storageConf);
       lockProviderList.add(lockProvider2);
       assertDoesNotThrow(() -> {
-        LOG.info("Writer 2 tries to acquire the lock.");
+        log.info("Writer 2 tries to acquire the lock.");
         writer2TryLock.set(true);
         lockProvider2.lock();
-        LOG.info("Writer 2 acquires the lock.");
+        log.info("Writer 2 acquires the lock.");
       });
       writer2Locked.set(true);
 
@@ -101,10 +100,10 @@ public class TestInProcessLockProvider {
 
       assertDoesNotThrow(() -> {
         lockProvider2.unlock();
-        LOG.info("Writer 2 releases the lock.");
+        log.info("Writer 2 releases the lock.");
       });
       lockProvider2.close();
-      LOG.info("Writer 2 closes the lock provider.");
+      log.info("Writer 2 closes the lock provider.");
       writer2Completed.set(true);
     });
 
@@ -126,18 +125,18 @@ public class TestInProcessLockProvider {
             + lockProvider3.getLock());
       }
       assertDoesNotThrow(() -> {
-        LOG.info("Writer 3 tries to acquire the lock.");
+        log.info("Writer 3 tries to acquire the lock.");
         writer3TryLock.set(true);
         lockProvider3.lock();
-        LOG.info("Writer 3 acquires the lock.");
+        log.info("Writer 3 acquires the lock.");
       });
 
       assertDoesNotThrow(() -> {
         lockProvider3.unlock();
-        LOG.info("Writer 3 releases the lock.");
+        log.info("Writer 3 releases the lock.");
       });
       lockProvider3.close();
-      LOG.info("Writer 3 closes the lock provider.");
+      log.info("Writer 3 closes the lock provider.");
       writer3Completed.set(true);
     });
 
@@ -150,9 +149,9 @@ public class TestInProcessLockProvider {
 
     assertDoesNotThrow(() -> {
       lockProvider1.unlock();
-      LOG.info("Writer 1 releases the lock.");
+      log.info("Writer 1 releases the lock.");
       lockProvider1.close();
-      LOG.info("Writer 1 closes the lock provider.");
+      log.info("Writer 1 closes the lock provider.");
       writer1Completed.set(true);
     });
 
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestNoopLockProvider.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestNoopLockProvider.java
index e27da253eb7c..24cf37287637 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestNoopLockProvider.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/client/transaction/lock/TestNoopLockProvider.java
@@ -23,10 +23,9 @@ import org.apache.hudi.common.config.LockConfiguration;
 import org.apache.hudi.common.config.TypedProperties;
 import org.apache.hudi.storage.StorageConfiguration;
 
+import lombok.extern.slf4j.Slf4j;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -36,9 +35,9 @@ import static 
org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 /**
  * Tests {@code NoopLockProvider}.
  */
+@Slf4j
 public class TestNoopLockProvider {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestNoopLockProvider.class);
   private final StorageConfiguration<?> storageConf = getDefaultStorageConf();
   private final LockConfiguration lockConfiguration1;
   private final LockConfiguration lockConfiguration2;
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/fs/TestFSUtilsWithRetryWrapperEnable.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/fs/TestFSUtilsWithRetryWrapperEnable.java
index 3026394011d4..8fc380cdc1b7 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/fs/TestFSUtilsWithRetryWrapperEnable.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/fs/TestFSUtilsWithRetryWrapperEnable.java
@@ -25,6 +25,7 @@ import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.hadoop.HoodieHadoopStorage;
 
+import lombok.Getter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -132,6 +133,7 @@ public class TestFSUtilsWithRetryWrapperEnable extends 
TestFSUtils {
     private FileSystem fs;
     private int count = 1;
     private int loop;
+    @Getter
     private short defaultReplication = 3;
 
     public FakeRemoteFileSystem(FileSystem fs, int retryLoop) {
@@ -257,11 +259,6 @@ public class TestFSUtilsWithRetryWrapperEnable extends 
TestFSUtils {
       return fs.getScheme();
     }
 
-    @Override
-    public short getDefaultReplication() {
-      return  defaultReplication;
-    }
-
     @Override
     public short getDefaultReplication(Path path) {
       return defaultReplication;
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
index 858e5abbd9ba..f607cc0f0899 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java
@@ -65,6 +65,7 @@ import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.StoragePathInfo;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.junit.jupiter.api.AfterEach;
@@ -75,8 +76,6 @@ import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
 import org.junit.jupiter.params.provider.ValueSource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -108,10 +107,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 /**
  * Tests hoodie table file system view {@link HoodieTableFileSystemView}.
  */
+@Slf4j
 @SuppressWarnings("ResultOfMethodCallIgnored")
 public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestHoodieTableFileSystemView.class);
   private static final String TEST_NAME_WITH_PARAMS = "[{index}] Test with 
bootstrap enable={0}";
   private static final String TEST_NAME_WITH_PARAMS_2 = "[{index}] Test with 
bootstrap enable={0}, preTableVersion8={1}";
 
@@ -1200,7 +1198,7 @@ public class TestHoodieTableFileSystemView extends 
HoodieCommonTestHarness {
     roView.getAllBaseFiles(partitionPath);
 
     fileSliceList = 
rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
-    LOG.info("FILESLICE LIST=" + fileSliceList);
+    log.info("FILESLICE LIST=" + fileSliceList);
     dataFiles = 
fileSliceList.stream().map(FileSlice::getBaseFile).filter(Option::isPresent).map(Option::get)
         .collect(Collectors.toList());
     assertEquals(1, dataFiles.size(), "Expect only one data-files in latest 
view as there is only one file-group");
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
index d25d1b53ddea..d01669b507f3 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java
@@ -55,13 +55,12 @@ import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.storage.StoragePath;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.fs.Path;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.file.Files;
@@ -92,9 +91,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 /**
  * Tests incremental file system view sync.
  */
+@Slf4j
 public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestIncrementalFSViewSync.class);
   private static final int NUM_FILE_IDS_PER_PARTITION = 10;
   private static final String TEST_WRITE_TOKEN = "1-0-1";
   private static final List<String> PARTITIONS = Arrays.asList("2018/01/01", 
"2018/01/02", "2019/03/01");
@@ -528,7 +527,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
     final int netFilesAddedPerInstant = numFilesAddedPerInstant - 
numFilesReplacedPerInstant;
     assertEquals(newCleanerInstants.size(), cleanedInstants.size());
     long exp = PARTITIONS.stream().mapToLong(p1 -> 
view.getAllFileSlices(p1).count()).findAny().getAsLong();
-    LOG.info("Initial File Slices :" + exp);
+    log.info("Initial File Slices :" + exp);
     for (int idx = 0; idx < newCleanerInstants.size(); idx++) {
       String instant = cleanedInstants.get(idx);
       try {
@@ -545,8 +544,8 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
         assertEquals(State.COMPLETED, view.getLastInstant().get().getState());
         assertEquals(HoodieTimeline.CLEAN_ACTION, 
view.getLastInstant().get().getAction());
         PARTITIONS.forEach(p -> {
-          LOG.info("PARTITION : " + p);
-          LOG.info("\tFileSlices :" + 
view.getAllFileSlices(p).collect(Collectors.toList()));
+          log.info("PARTITION : " + p);
+          log.info("\tFileSlices :" + 
view.getAllFileSlices(p).collect(Collectors.toList()));
         });
 
         final int instantIdx = newCleanerInstants.size() - idx;
@@ -594,7 +593,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
             isDeltaCommit ? initialFileSlices : initialFileSlices - ((idx + 1) 
* (FILE_IDS_PER_PARTITION.size() - totalReplacedFileSlicesPerPartition));
         view.sync();
         assertTrue(view.getLastInstant().isPresent());
-        LOG.info("Last Instant is :" + view.getLastInstant().get());
+        log.info("Last Instant is :" + view.getLastInstant().get());
         if (isRestore) {
           assertEquals(newRestoreInstants.get(idx), 
view.getLastInstant().get().requestedTime());
           assertEquals(HoodieTimeline.RESTORE_ACTION, 
view.getLastInstant().get().getAction());
@@ -877,7 +876,7 @@ public class TestIncrementalFSViewSync extends 
HoodieCommonTestHarness {
     int multiple = begin;
     for (int idx = 0; idx < instants.size(); idx++) {
       String instant = instants.get(idx);
-      LOG.info("Adding instant=" + instant);
+      log.info("Adding instant=" + instant);
       HoodieInstant lastInstant = lastInstants.get(idx);
       // Add a non-empty ingestion to COW table
       List<String> filePaths = addInstant(metaClient, instant, deltaCommit);
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
index fd903383343c..85f5efd6de76 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java
@@ -50,13 +50,14 @@ import org.apache.hudi.storage.StorageConfiguration;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.hadoop.HadoopStorageConfiguration;
 
+import lombok.AccessLevel;
+import lombok.Setter;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.IndexedRecord;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.junit.jupiter.api.io.TempDir;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
@@ -80,12 +81,14 @@ import static 
org.apache.hudi.common.config.HoodieStorageConfig.PARQUET_COMPRESS
 /**
  * The common hoodie test harness to provide the basic infrastructure.
  */
+@Slf4j
 public class HoodieCommonTestHarness {
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieCommonTestHarness.class);
+
   protected static final String BASE_FILE_EXTENSION = 
HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().getFileExtension();
   protected static ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = 
null;
   protected static final HoodieLogBlock.HoodieLogBlockType 
DEFAULT_DATA_BLOCK_TYPE = HoodieLogBlock.HoodieLogBlockType.AVRO_DATA_BLOCK;
 
+  @Setter(AccessLevel.PROTECTED)
   protected String tableName;
   protected String basePath;
   protected URI baseUri;
@@ -98,10 +101,6 @@ public class HoodieCommonTestHarness {
   protected StorageConfiguration<Configuration> storageConf;
   protected HoodieStorage storage;
 
-  protected void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
   /**
    * Initializes basePath.
    */
@@ -275,7 +274,7 @@ public class HoodieCommonTestHarness {
           semaphore.release();
         }
       } catch (Exception e) {
-        LOG.warn("Error in polling for timeline", e);
+        log.warn("Error in polling for timeline", e);
       }
     }, 0, 1, TimeUnit.SECONDS);
     int maxWaitInMinutes = 10;
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java
index 976b9f828c3a..b81585275ac8 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestTable.java
@@ -61,24 +61,26 @@ import org.apache.hudi.common.table.timeline.TimelineUtils;
 import 
org.apache.hudi.common.table.timeline.versioning.DefaultInstantGenerator;
 import 
org.apache.hudi.common.table.timeline.versioning.clean.CleanPlanV2MigrationHandler;
 import org.apache.hudi.common.util.CompactionUtils;
-import org.apache.hudi.io.util.FileIOUtils;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.StringUtils;
 import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.hadoop.fs.HadoopFSUtils;
+import org.apache.hudi.io.util.FileIOUtils;
 import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.StoragePathInfo;
 
+import lombok.AccessLevel;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -144,12 +146,11 @@ import static 
org.apache.hudi.common.util.StringUtils.EMPTY_STRING;
 /**
  * Test Hoodie Table for testing only.
  */
+@Slf4j
 public class HoodieTestTable implements AutoCloseable {
 
   public static final String PHONY_TABLE_SCHEMA =
       "{\"namespace\": \"org.apache.hudi.avro.model\", \"type\": \"record\", 
\"name\": \"PhonyRecord\", \"fields\": []}";
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieTestTable.class);
   private static final Random RANDOM = new Random();
 
   protected static HoodieTestTableState testTableState;
@@ -160,6 +161,7 @@ public class HoodieTestTable implements AutoCloseable {
   protected final FileSystem fs;
   protected HoodieTableMetaClient metaClient;
   protected String currentInstantTime;
+  @Getter
   private boolean isNonPartitioned = false;
   protected Option<HoodieEngineContext> context;
   protected final InstantGenerator instantGenerator = new 
DefaultInstantGenerator();
@@ -191,10 +193,6 @@ public class HoodieTestTable implements AutoCloseable {
     this.isNonPartitioned = true;
   }
 
-  public boolean isNonPartitioned() {
-    return this.isNonPartitioned;
-  }
-
   public static String makeNewCommitTime(int sequence, String instantFormat) {
     return String.format(instantFormat, sequence);
   }
@@ -1500,7 +1498,9 @@ public class HoodieTestTable implements AutoCloseable {
     }
   }
 
+  @NoArgsConstructor(access = AccessLevel.PACKAGE)
   static class HoodieTestTableState {
+
     /**
      * Map<commitTime, Map<partitionPath, List<filesToDelete>>>
      * Used in building CLEAN metadata.
@@ -1517,9 +1517,6 @@ public class HoodieTestTable implements AutoCloseable {
      */
     Map<String, Map<String, List<Pair<String, Integer[]>>>> 
commitsToPartitionToLogFileInfoStats = new HashMap<>();
 
-    HoodieTestTableState() {
-    }
-
     static HoodieTestTableState of() {
       return new HoodieTestTableState();
     }
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java
index 2c762b2def9a..2a763b77c676 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java
@@ -20,11 +20,11 @@ package org.apache.hudi.common.testutils.minicluster;
 
 import org.apache.hudi.common.testutils.NetworkTestUtils;
 
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.BindException;
@@ -34,13 +34,13 @@ import java.util.Objects;
 /**
  * An HDFS minicluster service implementation.
  */
+@Slf4j
 public class HdfsTestService {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(HdfsTestService.class);
-
   /**
    * Configuration settings.
    */
+  @Getter
   private final Configuration hadoopConf;
   private final java.nio.file.Path dfsBaseDirPath;
 
@@ -58,16 +58,12 @@ public class HdfsTestService {
     this.dfsBaseDirPath = Files.createTempDirectory("hdfs-test-service" + 
System.currentTimeMillis());
   }
 
-  public Configuration getHadoopConf() {
-    return hadoopConf;
-  }
-
   public MiniDFSCluster start(boolean format) throws IOException {
     Objects.requireNonNull(dfsBaseDirPath, "dfs base dir must be set before 
starting cluster.");
 
     // If clean, then remove the work dir so we can start fresh.
     if (format) {
-      LOG.info("Cleaning HDFS cluster data at: " + dfsBaseDirPath + " and 
starting fresh.");
+      log.info("Cleaning HDFS cluster data at: " + dfsBaseDirPath + " and 
starting fresh.");
       Files.deleteIfExists(dfsBaseDirPath);
     }
 
@@ -86,7 +82,7 @@ public class HdfsTestService {
             datanodePort, datanodeIpcPort, datanodeHttpPort);
         miniDfsCluster = new 
MiniDFSCluster.Builder(hadoopConf).numDataNodes(1).format(format).checkDataNodeAddrConfig(true)
             .checkDataNodeHostConfig(true).build();
-        LOG.info("HDFS Minicluster service started.");
+        log.info("HDFS Minicluster service started.");
         return miniDfsCluster;
       } catch (BindException ex) {
         ++loop;
@@ -100,7 +96,7 @@ public class HdfsTestService {
   }
 
   public void stop() {
-    LOG.info("HDFS Minicluster service being shut down.");
+    log.info("HDFS Minicluster service being shut down.");
     if (miniDfsCluster != null) {
       miniDfsCluster.shutdown(true, true);
     }
@@ -118,7 +114,7 @@ public class HdfsTestService {
   private static Configuration configureDFSCluster(Configuration config, 
String dfsBaseDir, String bindIP,
       int namenodeRpcPort, int datanodePort, int datanodeIpcPort, int 
datanodeHttpPort) {
 
-    LOG.info("HDFS force binding to ip: " + bindIP);
+    log.info("HDFS force binding to ip: " + bindIP);
     config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + 
namenodeRpcPort);
     config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + 
datanodePort);
     config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + 
datanodeIpcPort);
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/io/hadoop/TestHoodieBaseParquetWriter.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/io/hadoop/TestHoodieBaseParquetWriter.java
index 6bcd2b473fb4..fdc994705d71 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/io/hadoop/TestHoodieBaseParquetWriter.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/io/hadoop/TestHoodieBaseParquetWriter.java
@@ -30,6 +30,8 @@ import org.apache.hudi.common.util.Option;
 import org.apache.hudi.storage.StorageConfiguration;
 import org.apache.hudi.storage.StoragePath;
 
+import lombok.Getter;
+import lombok.Setter;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.IndexedRecord;
 import org.apache.parquet.avro.AvroSchemaConverter;
@@ -49,8 +51,10 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestHoodieBaseParquetWriter {
 
+  @Setter
   private static class MockHoodieParquetWriter extends 
HoodieBaseParquetWriter<IndexedRecord> {
 
+    @Getter
     long writtenRecordCount = 0L;
     long currentDataSize = 0L;
 
@@ -64,19 +68,6 @@ public class TestHoodieBaseParquetWriter {
     public long getDataSize() {
       return currentDataSize;
     }
-
-    @Override
-    public long getWrittenRecordCount() {
-      return writtenRecordCount;
-    }
-
-    public void setWrittenRecordCount(long writtenCount) {
-      this.writtenRecordCount = writtenCount;
-    }
-
-    public void setCurrentDataSize(long currentDataSize) {
-      this.currentDataSize = currentDataSize;
-    }
   }
 
   @TempDir
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestHoodieParquetFileBinaryCopier.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestHoodieParquetFileBinaryCopier.java
index 9d85f371f008..4bf897537332 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestHoodieParquetFileBinaryCopier.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestHoodieParquetFileBinaryCopier.java
@@ -18,9 +18,10 @@
 
 package org.apache.hudi.parquet.io;
 
-import org.apache.hudi.util.HoodieFileMetadataMerger;
 import org.apache.hudi.storage.StoragePath;
+import org.apache.hudi.util.HoodieFileMetadataMerger;
 
+import lombok.Value;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.HadoopReadOptions;
@@ -839,22 +840,11 @@ public class TestHoodieParquetFileBinaryCopier {
     }
   }
 
+  @Value
   public static class TestFile {
-    private final String fileName;
-    private final SimpleGroup[] fileContent;
-
-    public TestFile(String fileName, SimpleGroup[] fileContent) {
-      this.fileName = fileName;
-      this.fileContent = fileContent;
-    }
 
-    public String getFileName() {
-      return this.fileName;
-    }
-
-    public SimpleGroup[] getFileContent() {
-      return this.fileContent;
-    }
+    String fileName;
+    SimpleGroup[] fileContent;
   }
 
   private void verifyColumnConvert(
diff --git 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestParquetBinaryCopyChecker.java
 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestParquetBinaryCopyChecker.java
index 98ceb4fbadad..da66905fb6db 100644
--- 
a/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestParquetBinaryCopyChecker.java
+++ 
b/hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestParquetBinaryCopyChecker.java
@@ -306,7 +306,7 @@ public class TestParquetBinaryCopyChecker {
     String testFile = makeTestFile(schema, "simple");
     ParquetFileInfo info = ParquetBinaryCopyChecker.collectFileInfo(conf, 
testFile);
     Assertions.assertNotNull(info);
-    Assertions.assertTrue(info.canBinaryCopy());
+    Assertions.assertTrue(info.isBinaryCopyEnabled());
     String schemaString = schema.toString();
     assertFileInfo(
         info,
@@ -381,7 +381,7 @@ public class TestParquetBinaryCopyChecker {
 
   private void assertFileInfo(ParquetFileInfo info, boolean support, String 
codeType, String schema) {
     Assertions.assertNotNull(info);
-    Assertions.assertEquals(support, info.canBinaryCopy());
+    Assertions.assertEquals(support, info.isBinaryCopyEnabled());
     if (support) {
       Assertions.assertEquals(codeType, info.getBloomFilterTypeCode());
       Assertions.assertEquals(schema, info.getSchema().toString());

Reply via email to