This is an automated email from the ASF dual-hosted git repository.

schang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 3dfe2cda88d2 refactor: Add Lombok annotations to hudi-common module 
(part 2) (#17655)
3dfe2cda88d2 is described below

commit 3dfe2cda88d2f0b1dde913c0791bd69a27764784
Author: voonhous <[email protected]>
AuthorDate: Sat Jan 10 03:50:35 2026 +0800

    refactor: Add Lombok annotations to hudi-common module (part 2) (#17655)
    
    * refactor: Add Lombok annotations to hudi-common module (part 2)
    
    * Address comments
    
    * Address comments
    
    * Ensure that HoodieColumnRangeMetadata does not violate any 
equals/hashCode contract.
---
 .../metadata/HoodieBackedTableMetadataWriter.java  |   6 +-
 .../hudi/internal/schema/InternalSchema.java       |  24 +---
 .../internal/schema/InternalSchemaBuilder.java     |   8 +-
 .../java/org/apache/hudi/internal/schema/Type.java |  11 +-
 .../org/apache/hudi/internal/schema/Types.java     |  25 ++---
 .../hudi/internal/schema/action/TableChange.java   |  30 ++---
 .../hudi/internal/schema/action/TableChanges.java  |  24 ++--
 .../internal/schema/action/TableChangesHelper.java |   3 +-
 .../io/FileBasedInternalSchemaStorageManager.java  |  13 +--
 .../internal/schema/utils/InternalSchemaUtils.java |   7 +-
 .../internal/schema/utils/SchemaChangeUtils.java   |   7 +-
 .../hudi/internal/schema/utils/SerDeHelper.java    |   6 +-
 .../apache/hudi/io/storage/HoodieHFileConfig.java  |  48 ++------
 .../io/storage/HoodieNativeAvroHFileReader.java    |  10 +-
 .../apache/hudi/io/storage/HoodieOrcConfig.java    |  41 +------
 .../org/apache/hudi/keygen/BaseKeyGenerator.java   |  11 +-
 .../hudi/keygen/constant/KeyGeneratorType.java     |  20 ++--
 .../apache/hudi/metadata/BaseTableMetadata.java    |  37 +++----
 .../hudi/metadata/BloomFilterIndexRawKey.java      |  44 ++------
 .../metadata/ColumnStatsIndexPrefixRawKey.java     |  50 ++-------
 .../hudi/metadata/ColumnStatsIndexRawKey.java      |  55 ++-------
 .../EmptyHoodieRecordPayloadWithPartition.java     |  14 +--
 .../org/apache/hudi/metadata/FilesIndexRawKey.java |  36 +-----
 .../hudi/metadata/HoodieBackedTableMetadata.java   |  22 ++--
 .../hudi/metadata/HoodieDataCleanupManager.java    |  21 ++--
 .../hudi/metadata/HoodieMetadataMetrics.java       |   8 +-
 .../hudi/metadata/HoodieTableMetadataUtil.java     | 102 +++++++----------
 .../hudi/metadata/MetadataPartitionType.java       |  14 +--
 .../apache/hudi/metadata/RecordIndexRawKey.java    |  36 +-----
 .../hudi/metadata/SecondaryIndexPrefixRawKey.java  |  34 +-----
 .../hudi/metrics/ConsoleMetricsReporter.java       |  10 +-
 .../java/org/apache/hudi/metrics/HoodieGauge.java  |  31 ++----
 .../apache/hudi/metrics/JmxMetricsReporter.java    |  18 ++-
 .../org/apache/hudi/metrics/JmxReporterServer.java |   6 +-
 .../main/java/org/apache/hudi/metrics/Metrics.java |  33 +++---
 .../hudi/metrics/MetricsGraphiteReporter.java      |  10 +-
 .../hudi/metrics/MetricsReporterFactory.java       |  10 +-
 .../apache/hudi/metrics/Slf4jMetricsReporter.java  |  14 +--
 .../custom/CustomizableMetricsReporter.java        |  18 +--
 .../hudi/metrics/datadog/DatadogHttpClient.java    |  24 ++--
 .../hudi/metrics/datadog/DatadogReporter.java      |   8 +-
 .../apache/hudi/metrics/m3/M3MetricsReporter.java  |   9 +-
 .../metrics/prometheus/PrometheusReporter.java     |  48 +++-----
 .../metrics/prometheus/PushGatewayReporter.java    |   9 +-
 .../hudi/stats/HoodieColumnRangeMetadata.java      | 123 +++------------------
 .../java/org/apache/hudi/stats/ValueMetadata.java  |  25 ++---
 .../hudi/timeline/TimelineServiceClient.java       |   8 +-
 .../hudi/timeline/TimelineServiceClientBase.java   |  24 +---
 48 files changed, 337 insertions(+), 858 deletions(-)

diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
index dd19afd9aa36..f278fc13fde2 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
@@ -431,7 +431,7 @@ public abstract class HoodieBackedTableMetadataWriter<I, O> 
implements HoodieTab
     Map<String, Map<String, Long>> partitionIdToAllFilesMap = 
partitionInfoList.stream()
         .map(p -> {
           String partitionName = 
HoodieTableMetadataUtil.getPartitionIdentifierForFilesPartition(p.getRelativePath());
-          return Pair.of(partitionName, p.getFileNameToSizeMap());
+          return Pair.of(partitionName, p.getFilenameToSizeMap());
         })
         .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
 
@@ -2134,13 +2134,13 @@ public abstract class 
HoodieBackedTableMetadataWriter<I, O> implements HoodieTab
         }
       } else {
         // Some files need to be cleaned and some to be added in the partition
-        Map<String, Long> fsFiles = 
dirInfoMap.get(partition).getFileNameToSizeMap();
+        Map<String, Long> fsFiles = 
dirInfoMap.get(partition).getFilenameToSizeMap();
         List<String> mdtFiles = metadataFiles.stream().map(mdtFile -> 
mdtFile.getPath().getName()).collect(Collectors.toList());
         List<String> filesDeleted = metadataFiles.stream().map(f -> 
f.getPath().getName())
             .filter(n -> !fsFiles.containsKey(n)).collect(Collectors.toList());
         Map<String, Long> filesToAdd = new HashMap<>();
         // new files could be added to DT due to restore that just happened 
which may not be tracked in RestoreMetadata.
-        dirInfoMap.get(partition).getFileNameToSizeMap().forEach((k, v) -> {
+        dirInfoMap.get(partition).getFilenameToSizeMap().forEach((k, v) -> {
           if (!mdtFiles.contains(k)) {
             filesToAdd.put(k, v);
           }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchema.java 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchema.java
index 5ed49dd65379..dfae50385c8d 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchema.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchema.java
@@ -22,6 +22,9 @@ import org.apache.hudi.common.util.StringUtils;
 import org.apache.hudi.internal.schema.Types.Field;
 import org.apache.hudi.internal.schema.Types.RecordType;
 
+import lombok.Getter;
+import lombok.Setter;
+
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -45,8 +48,11 @@ public class InternalSchema implements Serializable {
 
   private static final long DEFAULT_VERSION_ID = 0;
 
+  @Getter
   private final RecordType record;
 
+  @Getter
+  @Setter
   private int maxColumnId;
   private long versionId;
 
@@ -84,10 +90,6 @@ public class InternalSchema implements Serializable {
     this.maxColumnId = idToName.isEmpty() ? -1 : 
idToName.keySet().stream().max(Comparator.comparing(Integer::valueOf)).get();
   }
 
-  public RecordType getRecord() {
-    return record;
-  }
-
   private static Map<Integer, String> buildIdToName(RecordType record) {
     return record.fields().isEmpty()
         ? Collections.emptyMap()
@@ -144,20 +146,6 @@ public class InternalSchema implements Serializable {
     return this.versionId;
   }
 
-  /**
-   * Set the version ID for this schema.
-   */
-  public void setMaxColumnId(int maxColumnId) {
-    this.maxColumnId = maxColumnId;
-  }
-
-  /**
-   * Returns the max column id for this schema.
-   */
-  public int getMaxColumnId() {
-    return this.maxColumnId;
-  }
-
   /**
    * Returns a List of the {@link Field columns} in this Schema.
    */
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
index 7674caf971bc..b61a9acff2ba 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
@@ -22,6 +22,9 @@ import 
org.apache.hudi.internal.schema.visitor.InternalSchemaVisitor;
 import org.apache.hudi.internal.schema.visitor.NameToIDVisitor;
 import org.apache.hudi.internal.schema.visitor.NameToPositionVisitor;
 
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Deque;
@@ -34,16 +37,15 @@ import java.util.concurrent.atomic.AtomicInteger;
 /**
  * A build class to help build fields for InternalSchema
  */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
 public class InternalSchemaBuilder implements Serializable {
+
   private static final InternalSchemaBuilder INSTANCE = new 
InternalSchemaBuilder();
 
   public static InternalSchemaBuilder getBuilder() {
     return INSTANCE;
   }
 
-  private InternalSchemaBuilder() {
-  }
-
   /**
    * Build a mapping from id to full field name for a internal Type.
    * if a field y belong to a struct filed x, then the full name of y is x.y
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Type.java 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Type.java
index 31231bf96f59..da67b952258e 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Type.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Type.java
@@ -20,6 +20,8 @@ package org.apache.hudi.internal.schema;
 
 import org.apache.hudi.common.util.PartitionPathEncodeUtils;
 
+import lombok.Getter;
+
 import java.io.Serializable;
 import java.math.BigDecimal;
 import java.nio.ByteBuffer;
@@ -48,6 +50,7 @@ public interface Type extends Serializable {
   /**
    * Enums for type names.
    */
+  @Getter
   enum TypeID {
     RECORD(Types.RecordType.class),
     ARRAY(List.class),
@@ -79,14 +82,6 @@ public interface Type extends Serializable {
       this.name = this.name().toLowerCase(Locale.ROOT);
       this.classTag = classTag;
     }
-
-    public String getName() {
-      return name;
-    }
-
-    public Class<?> getClassTag() {
-      return classTag;
-    }
   }
 
   static TypeID fromValue(String value) {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java
index 9ae3bf6bdbfa..86ec29da9c45 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java
@@ -21,6 +21,10 @@ package org.apache.hudi.internal.schema;
 import org.apache.hudi.internal.schema.Type.NestedType;
 import org.apache.hudi.internal.schema.Type.PrimitiveType;
 
+import lombok.AccessLevel;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
+
 import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Collections;
@@ -33,9 +37,8 @@ import java.util.stream.Collectors;
 /**
  * Types supported in schema evolution.
  */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
 public class Types {
-  private Types() {
-  }
 
   /**
    * Boolean primitive type.
@@ -166,6 +169,7 @@ public class Types {
   /**
    * Time primitive type.
    */
+  @NoArgsConstructor(access = AccessLevel.PRIVATE)
   public static class TimeType extends PrimitiveType {
     private static final TimeType INSTANCE = new TimeType();
 
@@ -173,9 +177,6 @@ public class Types {
       return INSTANCE;
     }
 
-    private TimeType() {
-    }
-
     @Override
     public TypeID typeId() {
       return TypeID.TIME;
@@ -190,6 +191,7 @@ public class Types {
   /**
    * Time primitive type.
    */
+  @NoArgsConstructor(access = AccessLevel.PRIVATE)
   public static class TimestampType extends PrimitiveType {
     private static final TimestampType INSTANCE = new TimestampType();
 
@@ -197,9 +199,6 @@ public class Types {
       return INSTANCE;
     }
 
-    private TimestampType() {
-    }
-
     @Override
     public TypeID typeId() {
       return TypeID.TIMESTAMP;
@@ -583,12 +582,14 @@ public class Types {
       return new Field(true, id, name, type, null, null);
     }
 
+    @Getter
     private final boolean isOptional;
     private final int id;
     private final String name;
     private final Type type;
     private final String doc;
     // Experimental properties
+    @Getter
     private final Object defaultValue;
 
     private Field(boolean isOptional, int id, String name, Type type, String 
doc, Object defaultValue) {
@@ -600,14 +601,6 @@ public class Types {
       this.defaultValue = defaultValue;
     }
 
-    public Object getDefaultValue() {
-      return defaultValue;
-    }
-
-    public boolean isOptional() {
-      return isOptional;
-    }
-
     public int fieldId() {
       return id;
     }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
index e9932266b657..bb70015cd22a 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
@@ -25,6 +25,9 @@ import org.apache.hudi.internal.schema.InternalSchema;
 import org.apache.hudi.internal.schema.InternalSchemaBuilder;
 import org.apache.hudi.internal.schema.Types;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -40,17 +43,15 @@ public interface TableChange {
   /**
    * The action Type of schema change.
    */
+  @Getter
   enum ColumnChangeID {
     ADD, UPDATE, DELETE, PROPERTY_CHANGE, REPLACE;
+
     private final String name;
 
     ColumnChangeID() {
       this.name = this.name().toLowerCase(Locale.ROOT);
     }
-
-    public String getName() {
-      return name;
-    }
   }
 
   static ColumnChangeID fromValue(String value) {
@@ -185,7 +186,10 @@ public interface TableChange {
    * AFTER/BEFORE means the given columns should in the same struct;
    * FIRST means this field should be the first one within the struct.
    */
+  @AllArgsConstructor
+  @Getter
   class ColumnPositionChange {
+
     public enum ColumnPositionType {
       FIRST,
       BEFORE,
@@ -241,23 +245,5 @@ public interface TableChange {
           throw new IllegalArgumentException(String.format("only support 
first/before/after but found: %s", type));
       }
     }
-
-    private ColumnPositionChange(int srcId, int dsrId, ColumnPositionType 
type) {
-      this.srcId = srcId;
-      this.dsrId = dsrId;
-      this.type = type;
-    }
-
-    public int getSrcId() {
-      return srcId;
-    }
-
-    public int getDsrId() {
-      return dsrId;
-    }
-
-    public ColumnPositionType type() {
-      return type;
-    }
   }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
index cd1cdb289951..05d9e0bfa3f3 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChanges.java
@@ -26,6 +26,8 @@ import org.apache.hudi.internal.schema.Type;
 import org.apache.hudi.internal.schema.Types;
 import org.apache.hudi.internal.schema.utils.SchemaChangeUtils;
 
+import lombok.Getter;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -42,6 +44,8 @@ public class TableChanges {
 
   /** Deal with update columns changes for table. */
   public static class ColumnUpdateChange extends TableChange.BaseColumnChange {
+
+    @Getter
     private final Map<Integer, Types.Field> updates = new HashMap<>();
 
     private ColumnUpdateChange(InternalSchema schema) {
@@ -72,10 +76,6 @@ public class TableChanges {
       return type;
     }
 
-    public Map<Integer, Types.Field> getUpdates() {
-      return updates;
-    }
-
     /**
      * Update a column in the schema to a new type.
      * only support update primitive type.
@@ -242,6 +242,7 @@ public class TableChanges {
 
   /** Deal with delete columns changes for table. */
   public static class ColumnDeleteChange extends TableChange.BaseColumnChange {
+    @Getter
     private final Set<Integer> deletes = new HashSet<>();
 
     @Override
@@ -284,10 +285,6 @@ public class TableChanges {
       return type;
     }
 
-    public Set<Integer> getDeletes() {
-      return deletes;
-    }
-
     @Override
     protected Integer findIdByFullName(String fullName) {
       throw new UnsupportedOperationException("delete change cannot support 
this method");
@@ -298,7 +295,9 @@ public class TableChanges {
    * Deal with add columns changes for table.
    */
   public static class ColumnAddChange extends TableChange.BaseColumnChange {
+    @Getter
     private final Map<String, Integer> fullColName2Id = new HashMap<>();
+    @Getter
     private final Map<Integer, ArrayList<Types.Field>> parentId2AddCols = new 
HashMap<>();
     private int nextId;
 
@@ -376,19 +375,10 @@ public class TableChanges {
       this.nextId = internalSchema.getMaxColumnId() + 1;
     }
 
-    public Map<Integer, ArrayList<Types.Field>> getParentId2AddCols() {
-      return parentId2AddCols;
-    }
-
     public Map<Integer, ArrayList<ColumnPositionChange>> 
getPositionChangeMap() {
       return positionChangeMap;
     }
 
-    // expose to test
-    public Map<String, Integer> getFullColName2Id() {
-      return fullColName2Id;
-    }
-
     protected Integer findIdByFullName(String fullName) {
       Types.Field field = internalSchema.findField(fullName);
       if (field != null) {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChangesHelper.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChangesHelper.java
index 80b9c6298dd8..27d99c119443 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChangesHelper.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChangesHelper.java
@@ -28,6 +28,7 @@ import java.util.List;
  * Helper class to support Table schema changes.
  */
 public class TableChangesHelper {
+
   /**
    * Apply add operation and column position change operation.
    *
@@ -52,7 +53,7 @@ public class TableChangesHelper {
         Types.Field dsrField = result.stream().filter(f -> f.fieldId() == 
pchange.getDsrId()).findFirst().orElse(null);
         // we remove srcField first
         result.remove(srcField);
-        switch (pchange.type()) {
+        switch (pchange.getType()) {
           case AFTER:
             // add srcField after dsrField
             result.add(result.indexOf(dsrField) + 1, srcField);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/FileBasedInternalSchemaStorageManager.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/FileBasedInternalSchemaStorageManager.java
index 9aa964e5aa66..f3a591d6fbd0 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/FileBasedInternalSchemaStorageManager.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/io/FileBasedInternalSchemaStorageManager.java
@@ -22,19 +22,18 @@ import 
org.apache.hudi.common.config.HoodieTimeGeneratorConfig;
 import org.apache.hudi.common.table.HoodieTableMetaClient;
 import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
 import org.apache.hudi.common.table.timeline.HoodieInstant;
-import org.apache.hudi.io.util.FileIOUtils;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.exception.HoodieException;
 import org.apache.hudi.exception.HoodieIOException;
 import org.apache.hudi.internal.schema.InternalSchema;
 import org.apache.hudi.internal.schema.utils.InternalSchemaUtils;
 import org.apache.hudi.internal.schema.utils.SerDeHelper;
+import org.apache.hudi.io.util.FileIOUtils;
 import org.apache.hudi.storage.HoodieInstantWriter;
 import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -51,8 +50,8 @@ import static 
org.apache.hudi.common.util.StringUtils.getUTF8Bytes;
 /**
  * {@link AbstractInternalSchemaStorageManager} implementation based on the 
schema files.
  */
+@Slf4j
 public class FileBasedInternalSchemaStorageManager extends 
AbstractInternalSchemaStorageManager {
-  private static final Logger LOG = 
LoggerFactory.getLogger(FileBasedInternalSchemaStorageManager.class);
 
   public static final String SCHEMA_NAME = ".schema";
   private final StoragePath baseSchemaPath;
@@ -96,7 +95,7 @@ public class FileBasedInternalSchemaStorageManager extends 
AbstractInternalSchem
     timeline.saveAsComplete(false, metaClient.createNewInstant(
         HoodieInstant.State.INFLIGHT, hoodieInstant.getAction(), 
hoodieInstant.requestedTime()),
         Option.of(HoodieInstantWriter.convertByteArrayToWriter(writeContent)));
-    LOG.info(String.format("persist history schema success on commit time: 
%s", instantTime));
+    log.info("persist history schema success on commit time: {}", instantTime);
   }
 
   private void cleanResidualFiles() {
@@ -167,7 +166,7 @@ public class FileBasedInternalSchemaStorageManager extends 
AbstractInternalSchem
           byte[] content;
           try (InputStream is = storage.open(latestFilePath)) {
             content = FileIOUtils.readAsByteArray(is);
-            LOG.info(String.format("read history schema success from file : 
%s", latestFilePath));
+            log.info("read history schema success from file: {}", 
latestFilePath);
             return fromUTF8Bytes(content);
           } catch (IOException e) {
             throw new HoodieIOException("Could not read history schema from " 
+ latestFilePath, e);
@@ -177,7 +176,7 @@ public class FileBasedInternalSchemaStorageManager extends 
AbstractInternalSchem
     } catch (IOException io) {
       throw new HoodieException(io);
     }
-    LOG.info("failed to read history schema");
+    log.info("failed to read history schema");
     return "";
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
index c1fbc902f758..46538c02df90 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
@@ -25,6 +25,9 @@ import org.apache.hudi.internal.schema.Type;
 import org.apache.hudi.internal.schema.Types;
 import org.apache.hudi.internal.schema.Types.Field;
 
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+
 import java.util.ArrayList;
 import java.util.Deque;
 import java.util.HashMap;
@@ -39,11 +42,9 @@ import java.util.stream.Collectors;
  * Util methods to help us do some operations on InternalSchema.
  * eg: column prune, filter rebuild for query engine...
  */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
 public class InternalSchemaUtils {
 
-  private InternalSchemaUtils() {
-  }
-
   /**
    * Create project internalSchema, based on the project names which produced 
by query engine.
    * support nested project.
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SchemaChangeUtils.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SchemaChangeUtils.java
index 65f452d975f7..f02407b986ed 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SchemaChangeUtils.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SchemaChangeUtils.java
@@ -24,16 +24,17 @@ import org.apache.hudi.internal.schema.Types;
 import org.apache.hudi.internal.schema.action.TableChanges;
 import org.apache.hudi.internal.schema.action.TableChangesHelper;
 
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
+
 import java.util.ArrayList;
 import java.util.List;
 
 /**
  * Helper methods for schema Change.
  */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
 public class SchemaChangeUtils {
-  private SchemaChangeUtils() {
-
-  }
 
   /**
    * Whether to allow the column type to be updated.
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
index 9dc86178bb32..953bf60f93a5 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
@@ -29,6 +29,8 @@ import org.apache.hudi.internal.schema.Types;
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.fasterxml.jackson.databind.JsonNode;
+import lombok.AccessLevel;
+import lombok.NoArgsConstructor;
 
 import java.io.IOException;
 import java.io.StringWriter;
@@ -44,10 +46,8 @@ import java.util.regex.Pattern;
 /**
  * Utils of serialization and deserialization.
  */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
 public class SerDeHelper {
-  private SerDeHelper() {
-
-  }
 
   public static final String LATEST_SCHEMA = "latest_schema";
   public static final String SCHEMAS = "schemas";
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieHFileConfig.java 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieHFileConfig.java
index e175b95f2724..b459e5ff8ff6 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieHFileConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieHFileConfig.java
@@ -23,53 +23,21 @@ import org.apache.hudi.common.bloom.BloomFilter;
 import org.apache.hudi.io.compress.CompressionCodec;
 import org.apache.hudi.storage.StorageConfiguration;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+
+@AllArgsConstructor
+@Getter
 public class HoodieHFileConfig {
+
+  private final StorageConfiguration storageConf;
   private final CompressionCodec compressionCodec;
   private final int blockSize;
   private final long maxFileSize;
-  private final StorageConfiguration storageConf;
-  private final BloomFilter bloomFilter;
   private final String keyFieldName;
-
-  public HoodieHFileConfig(StorageConfiguration storageConf,
-                           CompressionCodec compressionCodec,
-                           int blockSize,
-                           long maxFileSize,
-                           String keyFieldName,
-                           BloomFilter bloomFilter) {
-    this.storageConf = storageConf;
-    this.compressionCodec = compressionCodec;
-    this.blockSize = blockSize;
-    this.maxFileSize = maxFileSize;
-    this.bloomFilter = bloomFilter;
-    this.keyFieldName = keyFieldName;
-  }
-
-  public StorageConfiguration getStorageConf() {
-    return storageConf;
-  }
-
-  public CompressionCodec getCompressionCodec() {
-    return compressionCodec;
-  }
-
-  public int getBlockSize() {
-    return blockSize;
-  }
-
-  public long getMaxFileSize() {
-    return maxFileSize;
-  }
+  private final BloomFilter bloomFilter;
 
   public boolean useBloomFilter() {
     return bloomFilter != null;
   }
-
-  public BloomFilter getBloomFilter() {
-    return bloomFilter;
-  }
-
-  public String getKeyFieldName() {
-    return keyFieldName;
-  }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
index 9ab010fc7d88..1358ed417235 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
@@ -42,11 +42,10 @@ import org.apache.hudi.io.hfile.UTF8StringKey;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.util.Lazy;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.avro.generic.GenericDatumReader;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.generic.IndexedRecord;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -70,8 +69,9 @@ import static 
org.apache.hudi.io.hfile.HFileUtils.isPrefixOfKey;
 /**
  * An implementation of {@link HoodieAvroHFileReaderImplBase} using native 
{@link HFileReader}.
  */
+@Slf4j
 public class HoodieNativeAvroHFileReader extends HoodieAvroHFileReaderImplBase 
{
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieNativeAvroHFileReader.class);
+
   // Keys of the meta info that should be preloaded on demand from the HFile
   private static final Set<String> PRELOADED_META_INFO_KEYS = new HashSet<>(
       Arrays.asList(KEY_MIN_RECORD, KEY_MAX_RECORD, SCHEMA_KEY));
@@ -140,7 +140,7 @@ public class HoodieNativeAvroHFileReader extends 
HoodieAvroHFileReaderImplBase {
             try {
               return reader.seekTo(new UTF8StringKey(k)) == 
HFileReader.SEEK_TO_FOUND;
             } catch (IOException e) {
-              LOG.error("Failed to check key availability: " + k);
+              log.error("Failed to check key availability: {}", k);
               return false;
             }
           })
@@ -426,7 +426,7 @@ public class HoodieNativeAvroHFileReader extends 
HoodieAvroHFileReaderImplBase {
         try {
           bloomFilter = readBloomFilter(reader);
         } catch (HoodieException e) {
-          LOG.warn("Unable to read bloom filter from HFile", e);
+          log.warn("Unable to read bloom filter from HFile", e);
         }
       }
       this.bloomFilterOption = Option.ofNullable(bloomFilter);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieOrcConfig.java 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieOrcConfig.java
index 7cac57fa9195..b4e5821996b8 100644
--- a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieOrcConfig.java
+++ b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieOrcConfig.java
@@ -21,53 +21,24 @@ package org.apache.hudi.io.storage;
 import org.apache.hudi.common.bloom.BloomFilter;
 import org.apache.hudi.storage.StorageConfiguration;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
 import org.apache.orc.CompressionKind;
 
+@AllArgsConstructor
+@Getter
 public class HoodieOrcConfig {
+
   public static final String AVRO_SCHEMA_METADATA_KEY = "orc.avro.schema";
 
+  private final StorageConfiguration<?> storageConf;
   private final CompressionKind compressionKind;
   private final int stripeSize;
   private final int blockSize;
   private final long maxFileSize;
-  private final StorageConfiguration<?> storageConf;
   private final BloomFilter bloomFilter;
 
-  public HoodieOrcConfig(StorageConfiguration<?> storageConf, CompressionKind 
compressionKind, int stripeSize,
-      int blockSize, long maxFileSize, BloomFilter bloomFilter) {
-    this.storageConf = storageConf;
-    this.compressionKind = compressionKind;
-    this.stripeSize = stripeSize;
-    this.blockSize = blockSize;
-    this.maxFileSize = maxFileSize;
-    this.bloomFilter = bloomFilter;
-  }
-
-  public StorageConfiguration<?> getStorageConf() {
-    return storageConf;
-  }
-
-  public CompressionKind getCompressionKind() {
-    return compressionKind;
-  }
-
-  public int getStripeSize() {
-    return stripeSize;
-  }
-
-  public int getBlockSize() {
-    return blockSize;
-  }
-
-  public long getMaxFileSize() {
-    return maxFileSize;
-  }
-
   public boolean useBloomFilter() {
     return bloomFilter != null;
   }
-
-  public BloomFilter getBloomFilter() {
-    return bloomFilter;
-  }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/keygen/BaseKeyGenerator.java 
b/hudi-common/src/main/java/org/apache/hudi/keygen/BaseKeyGenerator.java
index 76ed82883b73..3c19ae5cfe9d 100644
--- a/hudi-common/src/main/java/org/apache/hudi/keygen/BaseKeyGenerator.java
+++ b/hudi-common/src/main/java/org/apache/hudi/keygen/BaseKeyGenerator.java
@@ -22,6 +22,7 @@ import org.apache.hudi.common.model.HoodieKey;
 import org.apache.hudi.exception.HoodieKeyException;
 import org.apache.hudi.keygen.constant.KeyGeneratorOptions;
 
+import lombok.Getter;
 import org.apache.avro.generic.GenericRecord;
 
 import java.util.List;
@@ -36,9 +37,11 @@ public abstract class BaseKeyGenerator extends KeyGenerator {
   public static final String CUSTOM_KEY_GENERATOR_SPLIT_REGEX = ":";
   public static final String FIELD_SEPARATOR = ",";
   protected List<String> recordKeyFields;
+  @Getter
   protected List<String> partitionPathFields;
   protected final boolean encodePartitionPath;
   protected final boolean hiveStylePartitioning;
+  @Getter
   protected final boolean consistentLogicalTimestampEnabled;
 
   protected BaseKeyGenerator(TypedProperties config) {
@@ -76,12 +79,4 @@ public abstract class BaseKeyGenerator extends KeyGenerator {
   public List<String> getRecordKeyFieldNames() {
     return recordKeyFields;
   }
-
-  public List<String> getPartitionPathFields() {
-    return partitionPathFields;
-  }
-
-  public boolean isConsistentLogicalTimestampEnabled() {
-    return consistentLogicalTimestampEnabled;
-  }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
 
b/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
index 7d7c0194dbe3..a5e5e0b2b3e2 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/keygen/constant/KeyGeneratorType.java
@@ -25,8 +25,10 @@ import org.apache.hudi.common.config.TypedProperties;
 import org.apache.hudi.common.util.ConfigUtils;
 import org.apache.hudi.common.util.StringUtils;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.AccessLevel;
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 
 import javax.annotation.Nullable;
 
@@ -41,8 +43,11 @@ import static 
org.apache.hudi.common.table.HoodieTableConfig.KEY_GENERATOR_TYPE;
 /**
  * Types of {@link org.apache.hudi.keygen.KeyGenerator}.
  */
+@AllArgsConstructor(access = AccessLevel.PACKAGE)
+@Getter
 @EnumDescription("Key generator type, indicating the key generator class to 
use, that implements "
     + "`org.apache.hudi.keygen.KeyGenerator`.")
+@Slf4j
 public enum KeyGeneratorType {
 
   @EnumFieldDescription("Simple key generator, which takes names of fields to 
be used for recordKey and partitionPath as configs.")
@@ -104,15 +109,6 @@ public enum KeyGeneratorType {
   USER_PROVIDED(StringUtils.EMPTY_STRING);
 
   private String className;
-  private static final Logger LOG = 
LoggerFactory.getLogger(KeyGeneratorType.class);
-
-  KeyGeneratorType(String className) {
-    this.className = className;
-  }
-
-  public String getClassName() {
-    return className;
-  }
 
   public static KeyGeneratorType fromClassName(String className) {
     if (StringUtils.isNullOrEmpty(className)) {
@@ -150,7 +146,7 @@ public enum KeyGeneratorType {
       return keyGeneratorType.getClassName();
     }
     // No key generator information is provided.
-    LOG.info("No key generator type is set properly");
+    log.info("No key generator type is set properly");
     return null;
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java 
b/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
index e3cf7d528654..a567a8053c06 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metadata/BaseTableMetadata.java
@@ -44,8 +44,8 @@ import org.apache.hudi.storage.StorageConfiguration;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.StoragePathInfo;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -62,14 +62,15 @@ import java.util.stream.Collectors;
 /**
  * Abstract class for implementing common table metadata operations.
  */
+@Slf4j
 public abstract class BaseTableMetadata extends AbstractHoodieTableMetadata {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(BaseTableMetadata.class);
-
   protected final HoodieTableMetaClient dataMetaClient;
   protected final Option<HoodieMetadataMetrics> metrics;
+  @Getter
   protected final HoodieMetadataConfig metadataConfig;
 
+  @Getter
   protected boolean isMetadataTableInitialized;
   protected final boolean hiveStylePartitioningEnabled;
   protected final boolean urlEncodePartitioningEnabled;
@@ -164,14 +165,14 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
   @Override
   public Option<BloomFilter> getBloomFilter(final String partitionName, final 
String fileName, final String metadataPartitionName) throws 
HoodieMetadataException {
     if 
(!dataMetaClient.getTableConfig().getMetadataPartitions().contains(metadataPartitionName))
 {
-      LOG.error("Metadata bloom filter index is disabled!");
+      log.error("Metadata bloom filter index is disabled!");
       return Option.empty();
     }
 
     final Pair<String, String> partitionFileName = Pair.of(partitionName, 
fileName);
     Map<Pair<String, String>, BloomFilter> bloomFilters = 
getBloomFilters(Collections.singletonList(partitionFileName), 
metadataPartitionName);
     if (bloomFilters.isEmpty()) {
-      LOG.error("Meta index: missing bloom filter for partition: {}, file: 
{}", partitionName, fileName);
+      log.error("Meta index: missing bloom filter for partition: {}, file: 
{}", partitionName, fileName);
       return Option.empty();
     }
 
@@ -183,7 +184,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
   public Map<Pair<String, String>, BloomFilter> getBloomFilters(final 
List<Pair<String, String>> partitionNameFileNameList, final String 
metadataPartitionName)
       throws HoodieMetadataException {
     if 
(!dataMetaClient.getTableConfig().getMetadataPartitions().contains(metadataPartitionName))
 {
-      LOG.error("Metadata bloom filter index is disabled!");
+      log.error("Metadata bloom filter index is disabled!");
       return Collections.emptyMap();
     }
     if (partitionNameFileNameList.isEmpty()) {
@@ -228,7 +229,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
           partitionFileToBloomFilterMap.put(fileToKeyMap.get(entry.getKey()), 
bloomFilter);
         }
       } else {
-        LOG.error("Meta index bloom filter missing for: {}", 
fileToKeyMap.get(entry.getKey()));
+        log.error("Meta index bloom filter missing for: {}", 
fileToKeyMap.get(entry.getKey()));
       }
     }
     return partitionFileToBloomFilterMap;
@@ -246,7 +247,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
   public Map<Pair<String, String>, List<HoodieMetadataColumnStats>> 
getColumnStats(List<Pair<String, String>> partitionNameFileNameList, 
List<String> columnNames)
       throws HoodieMetadataException {
     if 
(!dataMetaClient.getTableConfig().isMetadataPartitionAvailable(MetadataPartitionType.COLUMN_STATS))
 {
-      LOG.error("Metadata column stats index is disabled!");
+      log.error("Metadata column stats index is disabled!");
       return Collections.emptyMap();
     }
 
@@ -276,7 +277,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
     })
         .orElse(Collections.emptyList());
 
-    LOG.info("Listed partitions from metadata: #partitions={}", 
partitions.size());
+    log.info("Listed partitions from metadata: #partitions={}", 
partitions.size());
     return partitions;
   }
 
@@ -305,7 +306,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
         })
         .orElseGet(Collections::emptyList);
 
-    LOG.debug("Listed file in partition from metadata: partition={}, 
#files={}", relativePartitionPath, pathInfoList.size());
+    log.debug("Listed file in partition from metadata: partition={}, 
#files={}", relativePartitionPath, pathInfoList.size());
     return pathInfoList;
   }
 
@@ -350,7 +351,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
             })
             .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
 
-    LOG.info("Listed files in {} partitions from metadata", 
partitionPaths.size());
+    log.info("Listed files in {} partitions from metadata", 
partitionPaths.size());
 
     return partitionPathToFilesMap;
   }
@@ -409,7 +410,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
         final Pair<String, String> partitionFileNamePair = 
columnStatKeyToFileNameMap.get(entry.getKey());
         fileToColumnStatMap.computeIfAbsent(partitionFileNamePair, k -> new 
ArrayList<>()).add(columnStatMetadata.get());
       } else {
-        LOG.error("Meta index column stats missing for {}", entry.getKey());
+        log.error("Meta index column stats missing for {}", entry.getKey());
       }
     }
     return fileToColumnStatMap;
@@ -421,7 +422,7 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
   private void checkForSpuriousDeletes(HoodieMetadataPayload metadataPayload, 
String partitionName) {
     if (!metadataPayload.getDeletions().isEmpty()) {
       if (metadataConfig.shouldIgnoreSpuriousDeletes()) {
-        LOG.warn("Metadata record for {} encountered some files to be deleted 
which were not added before."
+        log.warn("Metadata record for {} encountered some files to be deleted 
which were not added before."
                 + " Ignoring the spurious deletes as the `{}` config is set to 
true",
             partitionName, HoodieMetadataConfig.IGNORE_SPURIOUS_DELETES.key());
       } else {
@@ -472,10 +473,6 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
    */
   public abstract HoodiePairData<String, String> 
readSecondaryIndexDataTableRecordKeysWithKeys(HoodieData<String> keys, String 
partitionName);
 
-  public HoodieMetadataConfig getMetadataConfig() {
-    return metadataConfig;
-  }
-
   protected StorageConfiguration<?> getStorageConf() {
     return dataMetaClient.getStorageConf();
   }
@@ -484,8 +481,4 @@ public abstract class BaseTableMetadata extends 
AbstractHoodieTableMetadata {
     return 
dataMetaClient.getActiveTimeline().filterCompletedInstants().lastInstant()
         .map(HoodieInstant::requestedTime).orElse(SOLO_COMMIT_TIMESTAMP);
   }
-
-  public boolean isMetadataTableInitialized() {
-    return isMetadataTableInitialized;
-  }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/BloomFilterIndexRawKey.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/BloomFilterIndexRawKey.java
index 911a3d78380d..1808494f8afc 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/BloomFilterIndexRawKey.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/BloomFilterIndexRawKey.java
@@ -22,19 +22,19 @@ package org.apache.hudi.metadata;
 import org.apache.hudi.common.util.hash.FileIndexID;
 import org.apache.hudi.common.util.hash.PartitionIndexID;
 
-import java.util.Objects;
+import lombok.NonNull;
+import lombok.Value;
 
 /**
  * Represents a raw key for bloom filter metadata.
  */
+@Value
 public class BloomFilterIndexRawKey implements RawKey {
-  private final String partitionName;
-  private final String fileName;
 
-  public BloomFilterIndexRawKey(String partitionName, String fileName) {
-    this.partitionName = Objects.requireNonNull(partitionName);
-    this.fileName = Objects.requireNonNull(fileName);
-  }
+  @NonNull
+  String partitionName;
+  @NonNull
+  String fileName;
 
   @Override
   public String encode() {
@@ -42,34 +42,4 @@ public class BloomFilterIndexRawKey implements RawKey {
         new 
PartitionIndexID(HoodieTableMetadataUtil.getBloomFilterIndexPartitionIdentifier(partitionName)),
         new FileIndexID(fileName));
   }
-
-  public String getPartitionName() {
-    return partitionName;
-  }
-
-  public String getFileName() {
-    return fileName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    BloomFilterIndexRawKey that = (BloomFilterIndexRawKey) o;
-    return Objects.equals(partitionName, that.partitionName) && 
Objects.equals(fileName, that.fileName);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(partitionName, fileName);
-  }
-
-  @Override
-  public String toString() {
-    return "BloomFilterRawKey{" + "partitionName='" + partitionName + '\'' + 
", fileName='" + fileName + '\'' + '}';
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexPrefixRawKey.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexPrefixRawKey.java
index cb20d6201953..032b631f2829 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexPrefixRawKey.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexPrefixRawKey.java
@@ -22,16 +22,23 @@ import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.hash.ColumnIndexID;
 import org.apache.hudi.common.util.hash.PartitionIndexID;
 
-import java.util.Objects;
+import lombok.AllArgsConstructor;
+import lombok.NonNull;
+import lombok.Value;
 
 /**
  * Represents a raw key for column stats index consisting of column name and 
optional partition name.
  */
+@AllArgsConstructor
+@Value
 public class ColumnStatsIndexPrefixRawKey implements RawKey {
+
   private static final long serialVersionUID = 1L;
-  
-  private final String columnName;
-  private final Option<String> partitionName;
+
+  @NonNull
+  String columnName;
+  @NonNull
+  Option<String> partitionName;
   
   public ColumnStatsIndexPrefixRawKey(String columnName) {
     this(columnName, Option.empty());
@@ -41,19 +48,6 @@ public class ColumnStatsIndexPrefixRawKey implements RawKey {
     this(columnName, Option.of(partitionName));
   }
   
-  public ColumnStatsIndexPrefixRawKey(String columnName, Option<String> 
partitionName) {
-    this.columnName = Objects.requireNonNull(columnName, "Column name cannot 
be null");
-    this.partitionName = Objects.requireNonNull(partitionName, "Partition name 
option cannot be null");
-  }
-  
-  public String getColumnName() {
-    return columnName;
-  }
-  
-  public Option<String> getPartitionName() {
-    return partitionName;
-  }
-  
   @Override
   public String encode() {
     ColumnIndexID columnIndexID = new ColumnIndexID(columnName);
@@ -70,26 +64,4 @@ public class ColumnStatsIndexPrefixRawKey implements RawKey {
     
     return encodedValue;
   }
-  
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ColumnStatsIndexPrefixRawKey that = (ColumnStatsIndexPrefixRawKey) o;
-    return Objects.equals(columnName, that.columnName) && 
Objects.equals(partitionName, that.partitionName);
-  }
-  
-  @Override
-  public int hashCode() {
-    return Objects.hash(columnName, partitionName);
-  }
-  
-  @Override
-  public String toString() {
-    return "ColumnStatsIndexKey{columnName='" + columnName + "', 
partitionName=" + partitionName + "}";
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexRawKey.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexRawKey.java
index 3cab656a8275..3553c2278645 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexRawKey.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/ColumnStatsIndexRawKey.java
@@ -23,22 +23,22 @@ import org.apache.hudi.common.util.hash.ColumnIndexID;
 import org.apache.hudi.common.util.hash.FileIndexID;
 import org.apache.hudi.common.util.hash.PartitionIndexID;
 
-import java.util.Objects;
+import lombok.NonNull;
+import lombok.Value;
 
 /**
  * Represents a raw key for column stats indexed by partition, file and column.
  * This is different from ColumnStatsIndexPrefixRawKey which is used for 
prefix lookups.
  */
+@Value
 public class ColumnStatsIndexRawKey implements RawKey {
-  private final String partitionName;
-  private final String fileName;
-  private final String columnName;
 
-  public ColumnStatsIndexRawKey(String partitionName, String fileName, String 
columnName) {
-    this.partitionName = Objects.requireNonNull(partitionName);
-    this.fileName = Objects.requireNonNull(fileName);
-    this.columnName = Objects.requireNonNull(columnName);
-  }
+  @NonNull
+  String partitionName;
+  @NonNull
+  String fileName;
+  @NonNull
+  String columnName;
 
   @Override
   public String encode() {
@@ -47,41 +47,4 @@ public class ColumnStatsIndexRawKey implements RawKey {
         new FileIndexID(fileName),
         new ColumnIndexID(columnName));
   }
-
-  public String getPartitionName() {
-    return partitionName;
-  }
-
-  public String getFileName() {
-    return fileName;
-  }
-
-  public String getColumnName() {
-    return columnName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ColumnStatsIndexRawKey that = (ColumnStatsIndexRawKey) o;
-    return Objects.equals(partitionName, that.partitionName)
-        && Objects.equals(fileName, that.fileName)
-        && Objects.equals(columnName, that.columnName);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(partitionName, fileName, columnName);
-  }
-
-  @Override
-  public String toString() {
-    return "ColumnStatsFileRawKey{" + "partitionName='" + partitionName + '\''
-        + ", fileName='" + fileName + '\'' + ", columnName='" + columnName + 
'\'' + '}';
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/EmptyHoodieRecordPayloadWithPartition.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/EmptyHoodieRecordPayloadWithPartition.java
index ec55d0aedf19..023c1b9fae90 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/EmptyHoodieRecordPayloadWithPartition.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/EmptyHoodieRecordPayloadWithPartition.java
@@ -21,6 +21,9 @@ package org.apache.hudi.metadata;
 
 import org.apache.hudi.common.model.EmptyHoodieRecordPayload;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+
 /**
  * NOTE: This payload is intended to be used in-memeory ONLY. DO NOT use this 
in any serializing
  * Used to store the data table partition for delete records. The hoodie 
record stores the recordkey and
@@ -29,14 +32,9 @@ import org.apache.hudi.common.model.EmptyHoodieRecordPayload;
  * data table partition path will be in the metadata payload, but obviously 
the empty payload doesn't store
  * that info. Hence, this wrapper class
  */
+@AllArgsConstructor
+@Getter
 public class EmptyHoodieRecordPayloadWithPartition extends 
EmptyHoodieRecordPayload {
-  private final String partitionPath;
 
-  public EmptyHoodieRecordPayloadWithPartition(String partitionPath) {
-    this.partitionPath = partitionPath;
-  }
-
-  public String getPartitionPath() {
-    return partitionPath;
-  }
+  private final String partitionPath;
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/FilesIndexRawKey.java 
b/hudi-common/src/main/java/org/apache/hudi/metadata/FilesIndexRawKey.java
index 78fb9a70536a..2389fc0ee956 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metadata/FilesIndexRawKey.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metadata/FilesIndexRawKey.java
@@ -19,48 +19,22 @@
 
 package org.apache.hudi.metadata;
 
-import java.util.Objects;
+import lombok.NonNull;
+import lombok.Value;
 
 /**
  * Represents a raw key for the FILES partition in the metadata table.
  * This uses identity encoding - the key is used as-is without transformation.
  */
+@Value
 public class FilesIndexRawKey implements RawKey {
-  private final String key;
 
-  public FilesIndexRawKey(String key) {
-    this.key = Objects.requireNonNull(key);
-  }
+  @NonNull
+  String key;
 
   @Override
   public String encode() {
     // Identity encoding - return the key as-is
     return key;
   }
-
-  public String getKey() {
-    return key;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    FilesIndexRawKey that = (FilesIndexRawKey) o;
-    return Objects.equals(key, that.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(key);
-  }
-
-  @Override
-  public String toString() {
-    return "FilesPartitionRawKey{" + "key='" + key + '\'' + '}';
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
index 5f83a5a6cc42..61c34915dcc2 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java
@@ -73,10 +73,10 @@ import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.StoragePathInfo;
 
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.avro.generic.IndexedRecord;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -107,14 +107,14 @@ import static 
org.apache.hudi.metadata.MetadataPartitionType.RECORD_INDEX;
 /**
  * Table metadata provided by an internal DFS backed Hudi metadata table.
  */
+@Slf4j
 public class HoodieBackedTableMetadata extends BaseTableMetadata {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieBackedTableMetadata.class);
   private static final HoodieSchema SCHEMA = 
HoodieSchemaUtils.addMetadataFields(HoodieSchema.fromAvroSchema(HoodieMetadataRecord.getClassSchema()));
 
   private final String metadataBasePath;
   private final HoodieDataCleanupManager dataCleanupManager = new 
HoodieDataCleanupManager();
 
+  @Getter
   private HoodieTableMetaClient metadataMetaClient;
   private Set<String> validInstantTimestamps = null;
   private HoodieTableFileSystemView metadataFileSystemView;
@@ -148,7 +148,7 @@ public class HoodieBackedTableMetadata extends 
BaseTableMetadata {
   private void initIfNeeded() {
     if (!isMetadataTableInitialized) {
       if (!HoodieTableMetadata.isMetadataTable(metadataBasePath)) {
-        LOG.info("Metadata table is disabled.");
+        log.info("Metadata table is disabled.");
       }
     } else if (this.metadataMetaClient == null) {
       try {
@@ -157,13 +157,13 @@ public class HoodieBackedTableMetadata extends 
BaseTableMetadata {
             .setBasePath(metadataBasePath)
             .build();
       } catch (TableNotFoundException e) {
-        LOG.warn("Metadata table was not found at path {}", metadataBasePath);
+        log.warn("Metadata table was not found at path {}", metadataBasePath);
         this.isMetadataTableInitialized = false;
         this.metadataMetaClient = null;
         this.metadataFileSystemView = null;
         this.validInstantTimestamps = null;
       } catch (Exception e) {
-        LOG.error("Failed to initialize metadata table at path {}", 
metadataBasePath, e);
+        log.error("Failed to initialize metadata table at path {}", 
metadataBasePath, e);
         this.isMetadataTableInitialized = false;
         this.metadataMetaClient = null;
         this.metadataFileSystemView = null;
@@ -482,10 +482,10 @@ public class HoodieBackedTableMetadata extends 
BaseTableMetadata {
       parallelism = (int) keys.map(k -> mappingFunction.apply(k, 
numFileSlices)).distinct().count();
       // In case of empty lookup set, we should avoid RDD with 0 partitions.
       parallelism = Math.max(parallelism, 1);
-      LOG.info("Repartitioning keys for partition {} from list data with 
parallelism: {}", partitionName, parallelism);
+      log.info("Repartitioning keys for partition {} from list data with 
parallelism: {}", partitionName, parallelism);
       keys = getEngineContext().parallelize(keys.collectAsList(), parallelism);
     } else if (keys.getNumPartitions() < 
metadataConfig.getRepartitionMinPartitionsThreshold()) {
-      LOG.info("Repartitioning keys for partition {} to {} partitions", 
partitionName, metadataConfig.getRepartitionDefaultPartitions());
+      log.info("Repartitioning keys for partition {} to {} partitions", 
partitionName, metadataConfig.getRepartitionDefaultPartitions());
       keys = 
keys.repartition(metadataConfig.getRepartitionDefaultPartitions());
     }
     return keys;
@@ -718,10 +718,6 @@ public class HoodieBackedTableMetadata extends 
BaseTableMetadata {
     return isMetadataTableInitialized;
   }
 
-  public HoodieTableMetaClient getMetadataMetaClient() {
-    return metadataMetaClient;
-  }
-
   public HoodieTableFileSystemView getMetadataFileSystemView() {
     if (metadataFileSystemView == null) {
       metadataFileSystemView = 
getFileSystemViewForMetadataTable(metadataMetaClient);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieDataCleanupManager.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieDataCleanupManager.java
index c5181b051cd4..1b466955e82f 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieDataCleanupManager.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieDataCleanupManager.java
@@ -22,8 +22,9 @@ import org.apache.hudi.common.data.HoodieData;
 import org.apache.hudi.common.data.HoodiePairData;
 import org.apache.hudi.common.function.SerializableFunctionUnchecked;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.AccessLevel;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -35,10 +36,11 @@ import java.util.concurrent.ConcurrentHashMap;
  * This class provides thread-safe tracking of persisted data objects and 
ensures they are
  * properly cleaned up when exceptions occur.
  */
+@Slf4j
 public class HoodieDataCleanupManager implements Serializable {
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieDataCleanupManager.class);
-  
+
   // Thread-local tracking of persisted data for cleanup on exceptions
+  @Getter(AccessLevel.PACKAGE)
   private final ConcurrentHashMap<Long, List<Object>> threadPersistedData = 
new ConcurrentHashMap<>();
   
   /**
@@ -98,7 +100,7 @@ public class HoodieDataCleanupManager implements 
Serializable {
             ((HoodieData<?>) data).unpersistWithDependencies();
           }
         } catch (Exception e) {
-          LOG.warn("Failed to unpersist data on exception cleanup", e);
+          log.warn("Failed to unpersist data on exception cleanup", e);
         }
       }
     }
@@ -111,13 +113,4 @@ public class HoodieDataCleanupManager implements 
Serializable {
     long threadId = Thread.currentThread().getId();
     threadPersistedData.remove(threadId);
   }
-  
-  /**
-   * Get the thread-persisted data map for testing purposes.
-   *
-   * @return The concurrent map tracking persisted data by thread ID
-   */
-  ConcurrentHashMap<Long, List<Object>> getThreadPersistedData() {
-    return threadPersistedData;
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataMetrics.java 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataMetrics.java
index 49931975691a..32754a158725 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataMetrics.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataMetrics.java
@@ -31,8 +31,7 @@ import org.apache.hudi.metrics.Metrics;
 import org.apache.hudi.storage.HoodieStorage;
 
 import com.codahale.metrics.MetricRegistry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -46,6 +45,7 @@ import java.util.stream.Collectors;
 /**
  * Metrics for metadata.
  */
+@Slf4j
 public class HoodieMetadataMetrics implements Serializable {
 
   // Metric names
@@ -80,8 +80,6 @@ public class HoodieMetadataMetrics implements Serializable {
   public static final String TABLE_SERVICE_EXECUTION_DURATION = 
"table_service_execution_duration";
   public static final String ASYNC_INDEXER_CATCHUP_TIME = 
"async_indexer_catchup_time";
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieMetadataMetrics.class);
-
   private final transient MetricRegistry metricsRegistry;
   private final transient Metrics metrics;
 
@@ -159,7 +157,7 @@ public class HoodieMetadataMetrics implements Serializable {
   }
 
   protected void incrementMetric(String action, long value) {
-    LOG.debug("Updating metadata metrics ({}={}) in {}", action, value, 
metricsRegistry);
+    log.debug("Updating metadata metrics ({}={}) in {}", action, value, 
metricsRegistry);
     Option<HoodieGauge<Long>> gaugeOpt = metrics.registerGauge(action);
     gaugeOpt.ifPresent(gauge -> gauge.setValue(gauge.getValue() + value));
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
index 791654883473..b2839706ae78 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
@@ -122,12 +122,14 @@ import org.apache.hudi.storage.StoragePath;
 import org.apache.hudi.storage.StoragePathInfo;
 import org.apache.hudi.util.Lazy;
 
+import lombok.AccessLevel;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.avro.AvroTypeException;
 import org.apache.avro.LogicalTypes;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.IndexedRecord;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 
@@ -198,10 +200,10 @@ import static 
org.apache.hudi.stats.ValueMetadata.getValueMetadata;
 /**
  * A utility to convert timeline information to metadata table records.
  */
+@NoArgsConstructor(access = AccessLevel.PRIVATE)
+@Slf4j
 public class HoodieTableMetadataUtil {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(HoodieTableMetadataUtil.class);
-
   public static final String PARTITION_NAME_FILES = "files";
   public static final String PARTITION_NAME_PARTITION_STATS = 
"partition_stats";
   public static final String PARTITION_NAME_COLUMN_STATS = "column_stats";
@@ -224,9 +226,6 @@ public class HoodieTableMetadataUtil {
 
   private static final DeleteContext DELETE_CONTEXT = 
DeleteContext.fromRecordSchema(CollectionUtils.emptyProps(), 
HoodieSchema.fromAvroSchema(HoodieMetadataRecord.getClassSchema()));
 
-  private HoodieTableMetadataUtil() {
-  }
-
   public static final Set<Class<?>> COLUMN_STATS_RECORD_SUPPORTED_TYPES = new 
HashSet<>(Arrays.asList(
       IntWrapper.class, BooleanWrapper.class, DateWrapper.class,
       DoubleWrapper.class, FloatWrapper.class, LongWrapper.class,
@@ -397,7 +396,7 @@ public class HoodieTableMetadataUtil {
 
   public static Option<String> getColumnStatsValueAsString(Object statsValue) {
     if (statsValue == null) {
-      LOG.info("Invalid column stats value: null");
+      log.info("Invalid column stats value: null");
       return Option.empty();
     }
     Class<?> statsValueClass = statsValue.getClass();
@@ -486,7 +485,7 @@ public class HoodieTableMetadataUtil {
                         String pathWithPartition = stat.getPath();
                         if (pathWithPartition == null) {
                           // Empty partition
-                          LOG.warn("Unable to find path in write stat to 
update metadata table {}", stat);
+                          log.warn("Unable to find path in write stat to 
update metadata table {}", stat);
                           return map;
                         }
 
@@ -514,7 +513,7 @@ public class HoodieTableMetadataUtil {
 
     records.addAll(updatedPartitionFilesRecords);
 
-    LOG.info("Updating at {} from Commit/{}. #partitions_updated={}, 
#files_added={}", instantTime, commitMetadata.getOperationType(),
+    log.info("Updating at {} from Commit/{}. #partitions_updated={}, 
#files_added={}", instantTime, commitMetadata.getOperationType(),
         records.size(), newFileCount.value());
 
     return records;
@@ -578,7 +577,7 @@ public class HoodieTableMetadataUtil {
       String pathWithPartition = hoodieWriteStat.getPath();
       if (pathWithPartition == null) {
         // Empty partition
-        LOG.error("Failed to find path in write stat to update metadata table 
{}", hoodieWriteStat);
+        log.error("Failed to find path in write stat to update metadata table 
{}", hoodieWriteStat);
         return Collections.emptyListIterator();
       }
 
@@ -593,7 +592,7 @@ public class HoodieTableMetadataUtil {
         try {
           final BloomFilter fileBloomFilter = fileReader.readBloomFilter();
           if (fileBloomFilter == null) {
-            LOG.error("Failed to read bloom filter for {}", writeFilePath);
+            log.error("Failed to read bloom filter for {}", writeFilePath);
             return Collections.emptyListIterator();
           }
           ByteBuffer bloomByteBuffer = 
ByteBuffer.wrap(getUTF8Bytes(fileBloomFilter.serializeToString()));
@@ -601,11 +600,11 @@ public class HoodieTableMetadataUtil {
               partition, fileName, instantTime, bloomFilterType, 
bloomByteBuffer, false);
           return Collections.singletonList(record).iterator();
         } catch (Exception e) {
-          LOG.error("Failed to read bloom filter for {}", writeFilePath);
+          log.error("Failed to read bloom filter for {}", writeFilePath);
           return Collections.emptyListIterator();
         }
       } catch (IOException e) {
-        LOG.error("Failed to get bloom filter for file: {}, write stat: {}", 
writeFilePath, hoodieWriteStat);
+        log.error("Failed to get bloom filter for file: {}, write stat: {}", 
writeFilePath, hoodieWriteStat);
       }
       return Collections.emptyListIterator();
     });
@@ -750,7 +749,7 @@ public class HoodieTableMetadataUtil {
       // if there are partitions to be deleted, add them to delete list
       
records.add(HoodieMetadataPayload.createPartitionListRecord(deletedPartitions, 
true));
     }
-    LOG.info("Updating at {} from Clean. #partitions_updated={}, 
#files_deleted={}, #partitions_deleted={}",
+    log.info("Updating at {} from Clean. #partitions_updated={}, 
#files_deleted={}, #partitions_deleted={}",
             instantTime, records.size(), fileDeleteCount[0], 
deletedPartitions.size());
     return records;
   }
@@ -784,7 +783,7 @@ public class HoodieTableMetadataUtil {
       
records.add(HoodieMetadataPayload.createPartitionListRecord(deletedPartitions, 
true));
     }
 
-    LOG.info("Re-adding missing records at {} during Restore. 
#partitions_updated={}, #files_added={}, #files_deleted={}, 
#partitions_deleted={}",
+    log.info("Re-adding missing records at {} during Restore. 
#partitions_updated={}, #files_added={}, #files_deleted={}, 
#partitions_deleted={}",
             instantTime, records.size(), filesAddedCount[0], 
fileDeleteCount[0], deletedPartitions.size());
     return 
Collections.singletonMap(MetadataPartitionType.FILES.getPartitionPath(), 
engineContext.parallelize(records, 1));
   }
@@ -852,7 +851,7 @@ public class HoodieTableMetadataUtil {
 
     if (columnsToIndex.isEmpty()) {
       // In case there are no columns to index, bail
-      LOG.info("No columns to index for column stats index.");
+      log.info("No columns to index for column stats index.");
       return engineContext.emptyHoodieData();
     }
 
@@ -955,7 +954,7 @@ public class HoodieTableMetadataUtil {
               allRecords.addAll(deletedRecords);
               return allRecords.iterator();
             }
-            LOG.warn("No base file or log file write stats found for fileId: 
{}", fileId);
+            log.warn("No base file or log file write stats found for fileId: 
{}", fileId);
             return Collections.emptyIterator();
           });
 
@@ -1288,7 +1287,7 @@ public class HoodieTableMetadataUtil {
       records.add(record);
     });
 
-    LOG.info("Found at {} from {}. #partitions_updated={}, #files_deleted={}, 
#files_appended={}",
+    log.info("Found at {} from {}. #partitions_updated={}, #files_deleted={}, 
#files_appended={}",
             instantTime, operation, records.size(), fileChangeCount[0], 
fileChangeCount[1]);
 
     return records;
@@ -1333,7 +1332,7 @@ public class HoodieTableMetadataUtil {
       final String filename = partitionFileFlagTuple.f1;
       final boolean isDeleted = partitionFileFlagTuple.f2;
       if (!FSUtils.isBaseFile(new StoragePath(filename))) {
-        LOG.info("Ignoring file {} as it is not a base file", filename);
+        log.info("Ignoring file {} as it is not a base file", filename);
         return Stream.<HoodieRecord>empty().iterator();
       }
 
@@ -1346,7 +1345,7 @@ public class HoodieTableMetadataUtil {
 
         // If reading the bloom filter failed then do not add a record for 
this file
         if (bloomFilterBuffer == null) {
-          LOG.error("Failed to read bloom filter from {}", addedFilePath);
+          log.error("Failed to read bloom filter from {}", addedFilePath);
           return Stream.<HoodieRecord>empty().iterator();
         }
       }
@@ -1371,7 +1370,7 @@ public class HoodieTableMetadataUtil {
     if ((partitionToAppendedFiles.isEmpty() && 
partitionToDeletedFiles.isEmpty())) {
       return engineContext.emptyHoodieData();
     }
-    LOG.info("Indexing {} columns for column stats index", 
columnsToIndex.size());
+    log.info("Indexing {} columns for column stats index", 
columnsToIndex.size());
 
     // Create the tuple (partition, filename, isDeleted) to handle both 
deletes and appends
     final List<Tuple3<String, String, Boolean>> partitionFileFlagTupleList = 
fetchPartitionFileInfoTriplets(partitionToDeletedFiles, 
partitionToAppendedFiles);
@@ -1463,7 +1462,7 @@ public class HoodieTableMetadataUtil {
    */
   public static List<FileSlice> getPartitionLatestMergedFileSlices(
       HoodieTableMetaClient metaClient, HoodieTableFileSystemView fsView, 
String partition) {
-    LOG.info("Loading latest merged file slices for metadata table partition 
{}", partition);
+    log.info("Loading latest merged file slices for metadata table partition 
{}", partition);
     return getPartitionFileSlices(metaClient, Option.of(fsView), partition, 
true);
   }
 
@@ -1478,7 +1477,7 @@ public class HoodieTableMetadataUtil {
    */
   public static List<FileSlice> 
getPartitionLatestFileSlices(HoodieTableMetaClient metaClient,
                                                              
Option<HoodieTableFileSystemView> fsView, String partition) {
-    LOG.info("Loading latest file slices for metadata table partition {}", 
partition);
+    log.info("Loading latest file slices for metadata table partition {}", 
partition);
     return getPartitionFileSlices(metaClient, fsView, partition, false);
   }
 
@@ -1781,15 +1780,15 @@ public class HoodieTableMetadataUtil {
             .readColumnStatsFromMetadata(datasetMetaClient.getStorage(), 
fullFilePath, columnsToIndex, indexVersion);
       } else if (FSUtils.isLogFile(fileName)) {
         Option<HoodieSchema> writerSchemaOpt = 
tryResolveSchemaForTable(datasetMetaClient);
-        LOG.info("Reading log file: {}, to build column range metadata.", 
partitionPathFileName);
+        log.info("Reading log file: {}, to build column range metadata.", 
partitionPathFileName);
         return getLogFileColumnRangeMetadata(fullFilePath.toString(), 
partitionPath, datasetMetaClient, columnsToIndex, writerSchemaOpt, 
maxBufferSize);
       }
-      LOG.warn("Column range index not supported for: {}", 
partitionPathFileName);
+      log.warn("Column range index not supported for: {}", 
partitionPathFileName);
       return Collections.emptyList();
     } catch (Exception e) {
       // NOTE: In case reading column range metadata from individual file 
failed,
       //       we simply fall back, in lieu of failing the whole task
-      LOG.error("Failed to fetch column range metadata for: {}", 
partitionPathFileName);
+      log.error("Failed to fetch column range metadata for: {}", 
partitionPathFileName);
       return Collections.emptyList();
     }
   }
@@ -2169,7 +2168,7 @@ public class HoodieTableMetadataUtil {
                   factory.createNewInstant(HoodieInstant.State.REQUESTED, 
HoodieTimeline.ROLLBACK_ACTION,
                       instant.requestedTime()));
           commitsToRollback = 
Collections.singletonList(rollbackPlan.getInstantToRollback().getCommitTime());
-          LOG.info("Fetching rollback info from requested instant as the 
completed file is empty {}", instant);
+          log.info("Fetching rollback info from requested instant as the 
completed file is empty {}", instant);
         }
         return commitsToRollback;
       }
@@ -2214,18 +2213,18 @@ public class HoodieTableMetadataUtil {
 
     if (backup) {
       final StoragePath metadataBackupPath = new 
StoragePath(metadataTablePath.getParent(), ".metadata_" + 
HoodieInstantTimeGenerator.getCurrentInstantTimeStr());
-      LOG.info("Backing up metadata directory to {} before deletion", 
metadataBackupPath);
+      log.info("Backing up metadata directory to {} before deletion", 
metadataBackupPath);
       try {
         if (storage.rename(metadataTablePath, metadataBackupPath)) {
           return metadataBackupPath.toString();
         }
       } catch (Exception e) {
         // If rename fails, we will ignore the backup and still delete the MDT
-        LOG.error("Failed to backup metadata table using rename", e);
+        log.error("Failed to backup metadata table using rename", e);
       }
     }
 
-    LOG.info("Deleting metadata table from {}", metadataTablePath);
+    log.info("Deleting metadata table from {}", metadataTablePath);
     try {
       storage.deleteDirectory(metadataTablePath);
     } catch (Exception e) {
@@ -2262,7 +2261,7 @@ public class HoodieTableMetadataUtil {
       }
     } catch (FileNotFoundException e) {
       // Ignoring exception as metadata table already does not exist
-      LOG.debug("Metadata table partition {} not found at path {}", 
partitionPath, metadataTablePartitionPath);
+      log.debug("Metadata table partition {} not found at path {}", 
partitionPath, metadataTablePartitionPath);
       return null;
     } catch (Exception e) {
       throw new HoodieMetadataException(String.format("Failed to check 
existence of MDT partition %s at path %s: ", partitionPath, 
metadataTablePartitionPath), e);
@@ -2271,17 +2270,17 @@ public class HoodieTableMetadataUtil {
     if (backup) {
       final StoragePath metadataPartitionBackupPath = new 
StoragePath(metadataTablePartitionPath.getParent().getParent(),
           String.format(".metadata_%s_%s", partitionPath, 
HoodieInstantTimeGenerator.getCurrentInstantTimeStr()));
-      LOG.info("Backing up MDT partition {} to {} before deletion", 
partitionPath, metadataPartitionBackupPath);
+      log.info("Backing up MDT partition {} to {} before deletion", 
partitionPath, metadataPartitionBackupPath);
       try {
         if (storage.rename(metadataTablePartitionPath, 
metadataPartitionBackupPath)) {
           return metadataPartitionBackupPath.toString();
         }
       } catch (Exception e) {
         // If rename fails, we will try to delete the table instead
-        LOG.error(String.format("Failed to backup MDT partition %s using 
rename", partitionPath), e);
+        log.error("Failed to backup MDT partition {} using rename", 
partitionPath, e);
       }
     } else {
-      LOG.info("Deleting metadata table partition from {}", 
metadataTablePartitionPath);
+      log.info("Deleting metadata table partition from {}", 
metadataTablePartitionPath);
       try {
         storage.deleteDirectory(metadataTablePartitionPath);
       } catch (Exception e) {
@@ -2404,7 +2403,7 @@ public class HoodieTableMetadataUtil {
       }
     }
 
-    LOG.info("Estimated file group count for MDT partition {} is {} "
+    log.info("Estimated file group count for MDT partition {} is {} "
             + "[recordCount={}, avgRecordSize={}, minFileGroupCount={}, 
maxFileGroupCount={}, growthFactor={}, "
             + "maxFileGroupSizeBytes={}]", partitionType.name(), 
fileGroupCount, recordCount, averageRecordSize, minFileGroupCount,
         maxFileGroupCount, growthFactor, maxFileGroupSizeBytes);
@@ -2694,10 +2693,10 @@ public class HoodieTableMetadataUtil {
     final Map<String, HoodieSchema> columnsToIndexSchemaMap = 
getColumnsToIndex(dataTableMetaClient.getTableConfig(), metadataConfig, 
lazyWriterSchemaOpt,
         
dataTableMetaClient.getActiveTimeline().getWriteTimeline().filterCompletedInstants().empty(),
 recordTypeOpt, partitionStatsIndexVersion);
     if (columnsToIndexSchemaMap.isEmpty()) {
-      LOG.warn("No columns to index for partition stats index");
+      log.warn("No columns to index for partition stats index");
       return engineContext.emptyHoodieData();
     }
-    LOG.debug("Indexing following columns for partition stats index: {}", 
columnsToIndexSchemaMap);
+    log.debug("Indexing following columns for partition stats index: {}", 
columnsToIndexSchemaMap);
 
     // Group by partition path and collect file names (BaseFile and LogFiles)
     List<Pair<String, Set<String>>> partitionToFileNames = 
partitionInfoList.stream()
@@ -2949,7 +2948,7 @@ public class HoodieTableMetadataUtil {
               //          currently)
               if (newFileInfo.getIsDeleted()) {
                 if (oldFileInfo.getIsDeleted()) {
-                  LOG.warn("A file is repeatedly deleted in the files 
partition of the metadata table: {}", key);
+                  log.warn("A file is repeatedly deleted in the files 
partition of the metadata table: {}", key);
                   return newFileInfo;
                 }
                 return null;
@@ -3097,11 +3096,13 @@ public class HoodieTableMetadataUtil {
    * required for initializing the metadata table. Saving limited properties 
reduces the total memory footprint when
    * a very large number of files are present in the dataset being initialized.
    */
+  @Getter(AccessLevel.PACKAGE)
   public static class DirectoryInfo implements Serializable {
+
     // Relative path of the directory (relative to the base directory)
     private final String relativePath;
     // Map of filenames within this partition to their respective sizes
-    private final HashMap<String, Long> filenameToSizeMap;
+    private final Map<String, Long> filenameToSizeMap;
     // List of directories within this partition
     private final List<StoragePath> subDirectories = new ArrayList<>();
     // Is this a hoodie partition
@@ -3141,27 +3142,6 @@ public class HoodieTableMetadataUtil {
         }
       }
     }
-
-    String getRelativePath() {
-      return relativePath;
-    }
-
-    int getTotalFiles() {
-      return filenameToSizeMap.size();
-    }
-
-    boolean isHoodiePartition() {
-      return isHoodiePartition;
-    }
-
-    List<StoragePath> getSubDirectories() {
-      return subDirectories;
-    }
-
-    // Returns a map of filenames mapped to their lengths
-    Map<String, Long> getFileNameToSizeMap() {
-      return filenameToSizeMap;
-    }
   }
 
   private static TypedProperties 
getFileGroupReaderPropertiesFromStorageConf(StorageConfiguration<?> 
storageConf) {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java 
b/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java
index 0c7d291f97d1..148439b006f6 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java
@@ -33,6 +33,7 @@ import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.index.expression.HoodieExpressionIndex;
 import org.apache.hudi.stats.ValueMetadata;
 
+import lombok.Getter;
 import org.apache.avro.generic.GenericRecord;
 
 import java.nio.ByteBuffer;
@@ -86,6 +87,7 @@ import static 
org.apache.hudi.metadata.HoodieTableMetadataUtil.mergeColumnStatsR
 /**
  * Partition types for metadata table.
  */
+@Getter
 public enum MetadataPartitionType {
   FILES(HoodieTableMetadataUtil.PARTITION_NAME_FILES, "files-", 2) {
     @Override
@@ -382,18 +384,6 @@ public enum MetadataPartitionType {
     return partitionPath;
   }
 
-  public String getPartitionPath() {
-    return partitionPath;
-  }
-
-  public String getFileIdPrefix() {
-    return fileIdPrefix;
-  }
-
-  public int getRecordType() {
-    return recordType;
-  }
-
   /**
    * Construct metadata payload from the given record.
    */
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/RecordIndexRawKey.java 
b/hudi-common/src/main/java/org/apache/hudi/metadata/RecordIndexRawKey.java
index 23e9d127a96c..2788b6bfed07 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metadata/RecordIndexRawKey.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metadata/RecordIndexRawKey.java
@@ -19,47 +19,21 @@
 
 package org.apache.hudi.metadata;
 
-import java.util.Objects;
+import lombok.NonNull;
+import lombok.Value;
 
 /**
  * Represents a record index key that requires no encoding (identity encoding).
  */
+@Value
 public class RecordIndexRawKey implements RawKey {
-  private final String recordKey;
 
-  public RecordIndexRawKey(String recordKey) {
-    this.recordKey = Objects.requireNonNull(recordKey);
-  }
+  @NonNull
+  String recordKey;
 
   @Override
   public String encode() {
     // Identity encoding - return the key as-is
     return recordKey;
   }
-
-  public String getRecordKey() {
-    return recordKey;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    RecordIndexRawKey that = (RecordIndexRawKey) o;
-    return Objects.equals(recordKey, that.recordKey);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(recordKey);
-  }
-
-  @Override
-  public String toString() {
-    return "RecordIndexRawKey{" + "recordKey='" + recordKey + '\'' + '}';
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metadata/SecondaryIndexPrefixRawKey.java
 
b/hudi-common/src/main/java/org/apache/hudi/metadata/SecondaryIndexPrefixRawKey.java
index fc442e07e0fe..22a7980fc70f 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metadata/SecondaryIndexPrefixRawKey.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metadata/SecondaryIndexPrefixRawKey.java
@@ -19,46 +19,18 @@
 
 package org.apache.hudi.metadata;
 
-import java.util.Objects;
+import lombok.Value;
 
 /**
  * Represents a secondary index key, whose raw content is the column value of 
the data table.
  */
+@Value
 public class SecondaryIndexPrefixRawKey implements RawKey {
-  private final String secondaryKey;
 
-  public SecondaryIndexPrefixRawKey(String secondaryKey) {
-    this.secondaryKey = secondaryKey;
-  }
+  String secondaryKey;
 
   @Override
   public String encode() {
     return 
SecondaryIndexKeyUtils.getEscapedSecondaryKeyPrefixFromSecondaryKey(secondaryKey);
   }
-
-  public String getSecondaryKey() {
-    return secondaryKey;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    SecondaryIndexPrefixRawKey that = (SecondaryIndexPrefixRawKey) o;
-    return Objects.equals(secondaryKey, that.secondaryKey);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(secondaryKey);
-  }
-
-  @Override
-  public String toString() {
-    return "SecondaryIndexKey{" + "secondaryKey='" + secondaryKey + '\'' + '}';
-  }
 }
\ No newline at end of file
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/ConsoleMetricsReporter.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/ConsoleMetricsReporter.java
index 246c613d0fbf..bb70189bc417 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/ConsoleMetricsReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/ConsoleMetricsReporter.java
@@ -21,17 +21,15 @@ package org.apache.hudi.metrics;
 import com.codahale.metrics.ConsoleReporter;
 import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.util.concurrent.TimeUnit;
 
 /**
  * Hudi Console metrics reporter. Reports the metrics by printing them to the 
stdout on the console.
  */
+@Slf4j
 public class ConsoleMetricsReporter extends MetricsReporter {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(ConsoleMetricsReporter.class);
   private final ConsoleReporter consoleReporter;
 
   public ConsoleMetricsReporter(MetricRegistry registry) {
@@ -46,7 +44,7 @@ public class ConsoleMetricsReporter extends MetricsReporter {
     if (consoleReporter != null) {
       consoleReporter.start(30, TimeUnit.SECONDS);
     } else {
-      LOG.error("Cannot start as the consoleReporter is null.");
+      log.error("Cannot start as the consoleReporter is null.");
     }
   }
 
@@ -55,7 +53,7 @@ public class ConsoleMetricsReporter extends MetricsReporter {
     if (consoleReporter != null) {
       consoleReporter.report();
     } else {
-      LOG.error("Cannot report metrics as the consoleReporter is null.");
+      log.error("Cannot report metrics as the consoleReporter is null.");
     }
   }
 
diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/HoodieGauge.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/HoodieGauge.java
index d69ab4dacce2..adb4df9981b9 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/HoodieGauge.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/HoodieGauge.java
@@ -19,34 +19,17 @@
 package org.apache.hudi.metrics;
 
 import com.codahale.metrics.Gauge;
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.Setter;
 
 /**
  * Similar to {@link Gauge}, but metric value can be updated by {@link 
#setValue(T)}.
  */
+@AllArgsConstructor
+@Getter
+@Setter
 public class HoodieGauge<T> implements Gauge<T> {
-  private volatile T value;
-
-  /**
-   * Create an instance with a default value.
-   */
-  public HoodieGauge(T value) {
-    this.value = value;
-  }
 
-  /**
-   * Set the metric to a new value.
-   */
-  public void setValue(T value) {
-    this.value = value;
-  }
-
-  /**
-   * Returns the metric's current value.
-   *
-   * @return the metric's current value
-   */
-  @Override
-  public T getValue() {
-    return value;
-  }
+  private volatile T value;
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java
index b56a41888ecb..623a055100a7 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java
@@ -22,8 +22,7 @@ import org.apache.hudi.config.metrics.HoodieMetricsConfig;
 import org.apache.hudi.exception.HoodieException;
 
 import com.codahale.metrics.MetricRegistry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import javax.management.MBeanServer;
 
@@ -35,10 +34,9 @@ import java.util.stream.IntStream;
 /**
  * Implementation of Jmx reporter, which used to report jmx metric.
  */
+@Slf4j
 public class JmxMetricsReporter extends MetricsReporter {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(JmxMetricsReporter.class);
-
   private final MetricRegistry registry;
   private JmxReporterServer jmxReporterServer;
 
@@ -62,10 +60,10 @@ public class JmxMetricsReporter extends MetricsReporter {
             "Could not start JMX server on any configured port. Ports: " + 
portsConfig
                 + ". Maybe require port range for multiple hoodie tables");
       }
-      LOG.info("Configured JMXReporter with {port:" + portsConfig + "}");
+      log.info("Configured JMXReporter with {port:" + portsConfig + "}");
     } catch (Exception e) {
       String msg = "Jmx initialize failed: ";
-      LOG.error(msg, e);
+      log.error(msg, e);
       throw new HoodieException(msg, e);
     }
   }
@@ -78,13 +76,13 @@ public class JmxMetricsReporter extends MetricsReporter {
     for (int port : ports) {
       try {
         jmxReporterServer = createJmxReport(host, port);
-        LOG.info("Started JMX server on port " + port + ".");
+        log.info("Started JMX server on port " + port + ".");
         break;
       } catch (Exception e) {
         if (e.getCause() instanceof ExportException) {
-          LOG.info("Skip for initializing jmx port " + port + " because of 
already in use");
+          log.info("Skip for initializing jmx port " + port + " because of 
already in use");
         } else {
-          LOG.info("Failed to initialize jmx port " + port + ". " + 
e.getMessage());
+          log.info("Failed to initialize jmx port " + port + ". " + 
e.getMessage());
         }
       }
     }
@@ -95,7 +93,7 @@ public class JmxMetricsReporter extends MetricsReporter {
     if (isServerCreated()) {
       jmxReporterServer.start();
     } else {
-      LOG.error("Cannot start as the jmxReporter is null.");
+      log.error("Cannot start as the jmxReporter is null.");
     }
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/JmxReporterServer.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/JmxReporterServer.java
index 8bad9fefa5c1..95e28e6f1653 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/JmxReporterServer.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/JmxReporterServer.java
@@ -24,6 +24,7 @@ import org.apache.hudi.exception.HoodieException;
 
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.jmx.JmxReporter;
+import lombok.Getter;
 
 import javax.management.MBeanServer;
 import javax.management.remote.JMXConnectorServer;
@@ -91,6 +92,7 @@ public class JmxReporterServer {
 
   private JMXConnectorServer connector;
   private Registry rmiRegistry;
+  @Getter
   private JmxReporter reporter;
 
   protected JmxReporterServer(MetricRegistry registry, String host, int port,
@@ -108,10 +110,6 @@ public class JmxReporterServer {
     }
   }
 
-  public JmxReporter getReporter() {
-    return reporter;
-  }
-
   public void start() {
     ValidationUtils.checkArgument(reporter != null && connector != null,
         "reporter or connector cannot be null!");
diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/Metrics.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/Metrics.java
index 1d3c09d97d56..4ea8b4b1ed46 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/Metrics.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/Metrics.java
@@ -27,8 +27,8 @@ import org.apache.hudi.storage.HoodieStorage;
 import org.apache.hudi.storage.StoragePath;
 
 import com.codahale.metrics.MetricRegistry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -39,12 +39,12 @@ import java.util.concurrent.ConcurrentHashMap;
 /**
  * This is the main class of the metrics system.
  */
+@Slf4j
 public class Metrics {
 
-  private static final Logger LOG = LoggerFactory.getLogger(Metrics.class);
-
   private static final Map<String, Metrics> METRICS_INSTANCE_PER_BASEPATH = 
new ConcurrentHashMap<>();
 
+  @Getter
   private final MetricRegistry registry;
   private final List<MetricsReporter> reporters;
   private final String commonMetricPrefix;
@@ -63,7 +63,7 @@ public class Metrics {
     if 
(StringUtils.nonEmpty(metricConfig.getMetricReporterFileBasedConfigs())) {
       reporters.addAll(addAdditionalMetricsExporters(metricConfig));
     }
-    if (reporters.size() == 0) {
+    if (reporters.isEmpty()) {
       throw new RuntimeException("Cannot initialize Reporters.");
     }
     reporters.forEach(MetricsReporter::start);
@@ -106,14 +106,13 @@ public class Metrics {
         if (reporter.isPresent()) {
           reporterList.add(reporter.get());
         } else {
-          LOG.error(String.format("Could not create reporter using properties 
path %s base path %s",
-              propPath, metricConfig.getBasePath()));
+          log.error("Could not create reporter using properties path {} base 
path {}", propPath, metricConfig.getBasePath());
         }
       }
     } catch (IOException e) {
-      LOG.error("Failed to add MetricsExporters", e);
+      log.error("Failed to add MetricsExporters", e);
     }
-    LOG.info("total additional metrics reporters added =" + 
reporterList.size());
+    log.info("total additional metrics reporters added = {}", 
reporterList.size());
     return reporterList;
   }
 
@@ -125,16 +124,16 @@ public class Metrics {
     if (!fromShutdownHook) {
       Runtime.getRuntime().removeShutdownHook(shutdownThread);
     } else {
-      LOG.debug("Shutting down the metrics reporter from shutdown hook.");
+      log.debug("Shutting down the metrics reporter from shutdown hook.");
     }
     if (initialized) {
       try {
         registerHoodieCommonMetrics();
         reporters.forEach(MetricsReporter::report);
-        LOG.info("Stopping the metrics reporter...");
+        log.info("Stopping the metrics reporter...");
         reporters.forEach(MetricsReporter::stop);
       } catch (Exception e) {
-        LOG.warn("Error while closing reporter", e);
+        log.warn("Error while closing reporter", e);
       } finally {
         METRICS_INSTANCE_PER_BASEPATH.remove(basePath);
         initialized = false;
@@ -144,13 +143,13 @@ public class Metrics {
 
   public synchronized void flush() {
     try {
-      LOG.info("Reporting and flushing all metrics");
+      log.info("Reporting and flushing all metrics");
       registerHoodieCommonMetrics();
       reporters.forEach(MetricsReporter::report);
       registry.getNames().forEach(this.registry::remove);
       registerHoodieCommonMetrics();
     } catch (Exception e) {
-      LOG.error("Error while reporting and flushing metrics", e);
+      log.error("Error while reporting and flushing metrics", e);
     }
   }
 
@@ -167,7 +166,7 @@ public class Metrics {
     } catch (Exception e) {
       // Here we catch all exception, so the major upsert pipeline will not be 
affected if the
       // metrics system has some issues.
-      LOG.error("Failed to send metrics: ", e);
+      log.error("Failed to send metrics: ", e);
     }
     return Option.ofNullable(gauge);
   }
@@ -176,10 +175,6 @@ public class Metrics {
     return registerGauge(metricName, 0);
   }
 
-  public MetricRegistry getRegistry() {
-    return registry;
-  }
-
   public static boolean isInitialized(String basePath) {
     if (METRICS_INSTANCE_PER_BASEPATH.containsKey(basePath)) {
       return METRICS_INSTANCE_PER_BASEPATH.get(basePath).initialized;
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
 
b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
index 161536e19cc5..80643c067034 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
@@ -24,8 +24,7 @@ import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.graphite.Graphite;
 import com.codahale.metrics.graphite.GraphiteReporter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.net.InetSocketAddress;
 import java.util.concurrent.TimeUnit;
@@ -33,9 +32,8 @@ import java.util.concurrent.TimeUnit;
 /**
  * Implementation of Graphite reporter, which connects to the Graphite server, 
and send metrics to that server.
  */
+@Slf4j
 public class MetricsGraphiteReporter extends MetricsReporter {
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(MetricsGraphiteReporter.class);
   private final MetricRegistry registry;
   private final GraphiteReporter graphiteReporter;
   private final HoodieMetricsConfig metricsConfig;
@@ -64,7 +62,7 @@ public class MetricsGraphiteReporter extends MetricsReporter {
     if (graphiteReporter != null) {
       graphiteReporter.start(periodSeconds, TimeUnit.SECONDS);
     } else {
-      LOG.error("Cannot start as the graphiteReporter is null.");
+      log.error("Cannot start as the graphiteReporter is null.");
     }
   }
 
@@ -73,7 +71,7 @@ public class MetricsGraphiteReporter extends MetricsReporter {
     if (graphiteReporter != null) {
       graphiteReporter.report();
     } else {
-      LOG.error("Cannot report metrics as the graphiteReporter is null.");
+      log.error("Cannot report metrics as the graphiteReporter is null.");
     }
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java
index 2add2fee6449..b93968e23d73 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java
@@ -30,18 +30,16 @@ import 
org.apache.hudi.metrics.prometheus.PrometheusReporter;
 import org.apache.hudi.metrics.prometheus.PushGatewayMetricsReporter;
 
 import com.codahale.metrics.MetricRegistry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.util.Properties;
 
 /**
  * Factory class for creating MetricsReporter.
  */
+@Slf4j
 public class MetricsReporterFactory {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(MetricsReporterFactory.class);
-
   public static Option<MetricsReporter> createReporter(HoodieMetricsConfig 
metricsConfig, MetricRegistry registry) {
     String reporterClassName = metricsConfig.getMetricReporterClassName();
 
@@ -58,7 +56,7 @@ public class MetricsReporterFactory {
     MetricsReporterType type = metricsConfig.getMetricsReporterType();
     MetricsReporter reporter = null;
     if (type == null) {
-      LOG.warn("Metric creation failed. {} is not configured",
+      log.warn("Metric creation failed. {} is not configured",
           HoodieMetricsConfig.METRICS_REPORTER_TYPE_VALUE.key());
       return Option.empty();
     }
@@ -96,7 +94,7 @@ public class MetricsReporterFactory {
         reporter = new Slf4jMetricsReporter(registry);
         break;
       default:
-        LOG.error("Reporter type[" + type + "] is not supported.");
+        log.error("Reporter type[" + type + "] is not supported.");
         break;
     }
     return Option.ofNullable(reporter);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/Slf4jMetricsReporter.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/Slf4jMetricsReporter.java
index 1a75f1ba3587..b7bde6b2a139 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/Slf4jMetricsReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/Slf4jMetricsReporter.java
@@ -21,23 +21,21 @@ package org.apache.hudi.metrics;
 import com.codahale.metrics.MetricFilter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Slf4jReporter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.util.concurrent.TimeUnit;
 
 /**
  * Hudi Slf4j metrics reporter. Reports the metrics by printing them to the 
log.
  */
+@Slf4j
 public class Slf4jMetricsReporter extends MetricsReporter {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(Slf4jMetricsReporter.class);
-
   private final Slf4jReporter reporter;
 
   public Slf4jMetricsReporter(MetricRegistry registry) {
     this.reporter = Slf4jReporter.forRegistry(registry)
-        .outputTo(LOG)
+        .outputTo(log)
         .convertRatesTo(TimeUnit.SECONDS)
         .convertDurationsTo(TimeUnit.MILLISECONDS)
         .filter(MetricFilter.ALL)
@@ -49,7 +47,7 @@ public class Slf4jMetricsReporter extends MetricsReporter {
     if (reporter != null) {
       reporter.start(30, TimeUnit.SECONDS);
     } else {
-      LOG.error("Cannot start as the reporter is null.");
+      log.error("Cannot start as the reporter is null.");
     }
   }
 
@@ -58,7 +56,7 @@ public class Slf4jMetricsReporter extends MetricsReporter {
     if (reporter != null) {
       reporter.report();
     } else {
-      LOG.error("Cannot report metrics as the reporter is null.");
+      log.error("Cannot report metrics as the reporter is null.");
     }
   }
 
@@ -67,7 +65,7 @@ public class Slf4jMetricsReporter extends MetricsReporter {
     if (reporter != null) {
       reporter.stop();
     } else {
-      LOG.error("Cannot stop as the reporter is null.");
+      log.error("Cannot stop as the reporter is null.");
     }
   }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
 
b/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
index 6922393f39aa..07a6934b3ad3 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
@@ -21,26 +21,18 @@ package org.apache.hudi.metrics.custom;
 import org.apache.hudi.metrics.MetricsReporter;
 
 import com.codahale.metrics.MetricRegistry;
+import lombok.AllArgsConstructor;
+import lombok.Getter;
 
 import java.util.Properties;
 
 /**
  * Extensible metrics reporter for custom implementation.
  */
+@AllArgsConstructor
+@Getter
 public abstract class CustomizableMetricsReporter extends MetricsReporter {
+
   private final Properties props;
   private final MetricRegistry registry;
-
-  public CustomizableMetricsReporter(Properties props, MetricRegistry 
registry) {
-    this.props = props;
-    this.registry = registry;
-  }
-
-  public Properties getProps() {
-    return props;
-  }
-
-  public MetricRegistry getRegistry() {
-    return registry;
-  }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogHttpClient.java
 
b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogHttpClient.java
index 5483f6f214a6..aaa201b1e657 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogHttpClient.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogHttpClient.java
@@ -22,6 +22,9 @@ import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.StringUtils;
 import org.apache.hudi.common.util.ValidationUtils;
 
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 import org.apache.http.HttpHeaders;
 import org.apache.http.HttpStatus;
 import org.apache.http.client.config.RequestConfig;
@@ -33,8 +36,6 @@ import org.apache.http.entity.ContentType;
 import org.apache.http.entity.StringEntity;
 import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.impl.client.HttpClientBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -44,10 +45,9 @@ import java.io.IOException;
  * <p>
  * Responsible for API endpoint routing, validating API key, and sending 
requests with metrics payload.
  */
+@Slf4j
 public class DatadogHttpClient implements Closeable {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(DatadogHttpClient.class);
-
   private static final String DEFAULT_HOST = "app.us.datadoghq";
   private static final String SERIES_URL_FORMAT = 
"https://%s.%s/api/v1/series";;
   private static final String VALIDATE_URL_FORMAT = 
"https://%s.%s/api/v1/validate";;
@@ -103,12 +103,12 @@ public class DatadogHttpClient implements Closeable {
     try (CloseableHttpResponse response = client.execute(request)) {
       int statusCode = response.getStatusLine().getStatusCode();
       if (statusCode >= 300) {
-        LOG.error("Failed to send to Datadog. Response was {}", response);
+        log.error("Failed to send to Datadog. Response was {}", response);
       } else {
-        LOG.debug("Sent metrics data (size: {}) to {}", payload.length(), 
seriesUrl);
+        log.debug("Sent metrics data (size: {}) to {}", payload.length(), 
seriesUrl);
       }
     } catch (IOException e) {
-      LOG.warn("Failed to send to Datadog.", e);
+      log.warn("Failed to send to Datadog.", e);
     }
   }
 
@@ -117,17 +117,11 @@ public class DatadogHttpClient implements Closeable {
     client.close();
   }
 
+  @AllArgsConstructor
+  @Getter
   public enum ApiSite {
     US("com"), EU("eu");
 
     private final String domain;
-
-    ApiSite(String domain) {
-      this.domain = domain;
-    }
-
-    public String getDomain() {
-      return domain;
-    }
   }
 }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
 
b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
index 1f3e2a4ca4a8..57fc1948bf00 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
@@ -36,8 +36,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.node.ArrayNode;
 import com.fasterxml.jackson.databind.node.ObjectNode;
 import com.fasterxml.jackson.databind.node.TextNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.IOException;
 import java.util.List;
@@ -53,10 +52,9 @@ import java.util.stream.Collectors;
  * <p>
  * Internally use {@link DatadogHttpClient} to interact with Datadog APIs.
  */
+@Slf4j
 public class DatadogReporter extends ScheduledReporter {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(DatadogReporter.class);
-
   private final DatadogHttpClient client;
   private final String prefix;
   private final Option<String> host;
@@ -117,7 +115,7 @@ public class DatadogReporter extends ScheduledReporter {
       try {
         client.close();
       } catch (IOException e) {
-        LOG.warn("Error disconnecting from Datadog.", e);
+        log.warn("Error disconnecting from Datadog.", e);
       }
     }
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/m3/M3MetricsReporter.java 
b/hudi-common/src/main/java/org/apache/hudi/metrics/m3/M3MetricsReporter.java
index cbdb52af91dc..544299208638 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/m3/M3MetricsReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/m3/M3MetricsReporter.java
@@ -27,8 +27,7 @@ import com.uber.m3.tally.Scope;
 import com.uber.m3.tally.m3.M3Reporter;
 import com.uber.m3.util.Duration;
 import com.uber.m3.util.ImmutableMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.net.InetSocketAddress;
 import java.util.Arrays;
@@ -39,9 +38,9 @@ import java.util.concurrent.TimeUnit;
 /**
  * Implementation of M3 Metrics reporter, which can report metrics to a 
https://m3db.io/ service
  */
+@Slf4j
 public class M3MetricsReporter extends MetricsReporter {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(M3MetricsReporter.class);
   private final HoodieMetricsConfig metricsConfig;
   private final MetricRegistry registry;
   private final ImmutableMap<String, String> tags;
@@ -55,7 +54,7 @@ public class M3MetricsReporter extends MetricsReporter {
     tagBuilder.put("service", metricsConfig.getM3Service());
     tagBuilder.put("env", metricsConfig.getM3Env());
     this.tags = tagBuilder.build();
-    LOG.info(String.format("Building M3 Reporter with M3 tags mapping: %s", 
tags));
+    log.info("Building M3 Reporter with M3 tags mapping: {}", tags);
   }
 
   private static Map parseOptionalTags(String tagValueString) {
@@ -107,7 +106,7 @@ public class M3MetricsReporter extends MetricsReporter {
         scopeReporter.report();
         scopeReporter.stop();
       } catch (Exception e) {
-        LOG.error(String.format("Error reporting metrics to M3: %s", e));
+        log.error("Error reporting metrics to M3:", e);
       }
     }
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PrometheusReporter.java
 
b/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PrometheusReporter.java
index 3bf912416135..2c6dfc67ae94 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PrometheusReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PrometheusReporter.java
@@ -30,8 +30,8 @@ import io.prometheus.client.dropwizard.DropwizardExports;
 import io.prometheus.client.dropwizard.samplebuilder.DefaultSampleBuilder;
 import io.prometheus.client.dropwizard.samplebuilder.SampleBuilder;
 import io.prometheus.client.exporter.HTTPServer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
 
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -47,13 +47,15 @@ import java.util.regex.Pattern;
  * Implementation of Prometheus reporter, which connects to the Http server, 
and get metrics
  * from that server.
  */
+@Slf4j
 public class PrometheusReporter extends MetricsReporter {
-  private static final Pattern LABEL_PATTERN = Pattern.compile("\\s*,\\s*");
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(PrometheusReporter.class);
+  private static final Pattern LABEL_PATTERN = Pattern.compile("\\s*,\\s*");
   private static final Map<Integer, PrometheusServerState> 
PORT_TO_SERVER_STATE = new ConcurrentHashMap<>();
 
+  @Getter
   private static class PrometheusServerState {
+
     private final HTTPServer httpServer;
     private final CollectorRegistry collectorRegistry;
     private final AtomicInteger referenceCount;
@@ -65,22 +67,6 @@ public class PrometheusReporter extends MetricsReporter {
       this.referenceCount = new AtomicInteger(0);
       this.exports = ConcurrentHashMap.newKeySet();
     }
-
-    public HTTPServer getHttpServer() {
-      return httpServer;
-    }
-
-    public CollectorRegistry getCollectorRegistry() {
-      return collectorRegistry;
-    }
-
-    public AtomicInteger getReferenceCount() {
-      return referenceCount;
-    }
-
-    public Set<DropwizardExports> getExports() {
-      return exports;
-    }
   }
 
   private final DropwizardExports metricExports;
@@ -105,7 +91,7 @@ public class PrometheusReporter extends MetricsReporter {
     PrometheusServerState serverState = getAndRegisterServerState(serverPort, 
metricExports);
     this.collectorRegistry = serverState.getCollectorRegistry();
     
-    LOG.debug("Registered PrometheusReporter for port {}, reference count: 
{}", 
+    log.debug("Registered PrometheusReporter for port {}, reference count: 
{}", 
              serverPort, serverState.getReferenceCount().get());
   }
 
@@ -121,12 +107,12 @@ public class PrometheusReporter extends MetricsReporter {
           try {
             server.close();
           } catch (Exception e) {
-            LOG.debug("Error closing Prometheus HTTP server during shutdown: 
{}", e.getMessage());
+            log.debug("Error closing Prometheus HTTP server during shutdown: 
{}", e.getMessage());
           }
         }));
       } catch (Exception e) {
         String msg = "Could not start PrometheusReporter HTTP server on port " 
+ serverPort;
-        LOG.error(msg, e);
+        log.error(msg, e);
         throw new HoodieException(msg, e);
       }
     }
@@ -151,10 +137,10 @@ public class PrometheusReporter extends MetricsReporter {
     if (!stopped.getAndSet(true)) {
       try {
         synchronized (PrometheusReporter.class) {
-          LOG.debug("PrometheusReporter.stop() called for port {}", 
serverPort);
+          log.debug("PrometheusReporter.stop() called for port {}", 
serverPort);
           PrometheusServerState serverState = 
PORT_TO_SERVER_STATE.get(serverPort);
           if (serverState == null) {
-            LOG.warn("No server state found for port {} during stop()", 
serverPort);
+            log.warn("No server state found for port {} during stop()", 
serverPort);
             return;
           }
           
@@ -165,12 +151,12 @@ public class PrometheusReporter extends MetricsReporter {
           if (newReferenceCount <= 0) {
             cleanupServer(serverPort);
           } else {
-            LOG.debug("Prometheus server on port {} still has {} references, 
keeping server alive", 
+            log.debug("Prometheus server on port {} still has {} references, 
keeping server alive", 
                      serverPort, newReferenceCount);
           }
         }
       } catch (Exception e) {
-        LOG.error("Error in PrometheusReporter.stop() for port {}", 
serverPort, e);
+        log.error("Error in PrometheusReporter.stop() for port {}", 
serverPort, e);
       }
     }
   }
@@ -181,7 +167,7 @@ public class PrometheusReporter extends MetricsReporter {
         collectorRegistry.unregister(metricExports);
         unregistered = true;
       } catch (Exception e) {
-        LOG.debug("Error unregistering metric exports for port {}: {}", 
serverPort, e.getMessage());
+        log.debug("Error unregistering metric exports for port {}: {}", 
serverPort, e.getMessage());
       }
     }
   }
@@ -192,19 +178,19 @@ public class PrometheusReporter extends MetricsReporter {
 
   private int decrementReferenceCount(PrometheusServerState serverState) {
     int newCount = serverState.getReferenceCount().decrementAndGet();
-    LOG.debug("Unregistered PrometheusReporter for port {}, reference count: 
{}", 
+    log.debug("Unregistered PrometheusReporter for port {}, reference count: 
{}", 
              serverPort, newCount);
     return newCount;
   }
 
   private static synchronized void cleanupServer(int serverPort) {
-    LOG.info("No more references to Prometheus server on port {}, stopping 
server", serverPort);
+    log.info("No more references to Prometheus server on port {}, stopping 
server", serverPort);
     PrometheusServerState serverState = 
PORT_TO_SERVER_STATE.remove(serverPort);
     if (serverState != null) {
       try {
         serverState.getHttpServer().close();
       } catch (Exception e) {
-        LOG.debug("Error closing Prometheus HTTP server on port {}: {}", 
serverPort, e.getMessage());
+        log.debug("Error closing Prometheus HTTP server on port {}: {}", 
serverPort, e.getMessage());
       }
     }
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PushGatewayReporter.java
 
b/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PushGatewayReporter.java
index 7c672a2e7992..b3ee82035ae1 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PushGatewayReporter.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/metrics/prometheus/PushGatewayReporter.java
@@ -32,8 +32,7 @@ import com.codahale.metrics.Timer;
 import io.prometheus.client.CollectorRegistry;
 import io.prometheus.client.dropwizard.DropwizardExports;
 import io.prometheus.client.exporter.PushGateway;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import lombok.extern.slf4j.Slf4j;
 
 import java.io.IOException;
 import java.net.MalformedURLException;
@@ -46,9 +45,9 @@ import java.util.SortedMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 
+@Slf4j
 public class PushGatewayReporter extends ScheduledReporter {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(PushGatewayReporter.class);
   // Ensures that we maintain a single PushGw client (single connection pool) 
per Push Gw Server instance.
   private static final Map<String, PushGateway> PUSH_GATEWAY_PER_HOSTNAME = 
new ConcurrentHashMap<>();
 
@@ -114,7 +113,7 @@ public class PushGatewayReporter extends ScheduledReporter {
       handleLabeledMetrics();
       pushGatewayClient.pushAdd(collectorRegistry, jobName, labels);
     } catch (IOException e) {
-      LOG.warn("Can't push monitoring information to pushGateway", e);
+      log.warn("Can't push monitoring information to pushGateway", e);
     }
   }
 
@@ -136,7 +135,7 @@ public class PushGatewayReporter extends ScheduledReporter {
         }
       }
     } catch (IOException e) {
-      LOG.warn("Failed to delete metrics from pushGateway with jobName {{}}", 
jobName, e);
+      log.warn("Failed to delete metrics from pushGateway with jobName {{}}", 
jobName, e);
     }
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/stats/HoodieColumnRangeMetadata.java
 
b/hudi-common/src/main/java/org/apache/hudi/stats/HoodieColumnRangeMetadata.java
index 0ddc3184771d..ab47c0e441a1 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/stats/HoodieColumnRangeMetadata.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/stats/HoodieColumnRangeMetadata.java
@@ -23,10 +23,13 @@ import org.apache.hudi.avro.model.HoodieMetadataColumnStats;
 import org.apache.hudi.common.util.ValidationUtils;
 import org.apache.hudi.metadata.HoodieIndexVersion;
 
+import lombok.EqualsAndHashCode;
+import lombok.ToString;
+import lombok.Value;
+
 import javax.annotation.Nullable;
 
 import java.io.Serializable;
-import java.util.Objects;
 
 import static org.apache.hudi.stats.ValueMetadata.getEmptyValueMetadata;
 
@@ -38,123 +41,31 @@ import static 
org.apache.hudi.stats.ValueMetadata.getEmptyValueMetadata;
  *        associated with
  */
 @SuppressWarnings("rawtype")
+@Value
 public class HoodieColumnRangeMetadata<T extends Comparable> implements 
Serializable {
-  private final String filePath;
-  private final String columnName;
-  @Nullable
-  private final T minValue;
-  @Nullable
-  private final T maxValue;
-  private final long nullCount;
-  private final long valueCount;
-  private final long totalSize;
-  private final long totalUncompressedSize;
-  private final ValueMetadata valueMetadata;
-
-  private HoodieColumnRangeMetadata(String filePath,
-                                    String columnName,
-                                    @Nullable T minValue,
-                                    @Nullable T maxValue,
-                                    long nullCount,
-                                    long valueCount,
-                                    long totalSize,
-                                    long totalUncompressedSize,
-                                    ValueMetadata valueMetadata) {
-    this.filePath = filePath;
-    this.columnName = columnName;
-    this.minValue = minValue;
-    this.maxValue = maxValue;
-    this.nullCount = nullCount;
-    this.valueCount = valueCount;
-    this.totalSize = totalSize;
-    this.totalUncompressedSize = totalUncompressedSize;
-    this.valueMetadata = valueMetadata;
-  }
-
-  public String getFilePath() {
-    return this.filePath;
-  }
-
-  public String getColumnName() {
-    return this.columnName;
-  }
 
+  String filePath;
+  String columnName;
   @Nullable
-  public T getMinValue() {
-    return this.minValue;
-  }
+  T minValue;
+  @Nullable
+  T maxValue;
+  long nullCount;
+  long valueCount;
+  long totalSize;
+  long totalUncompressedSize;
+  @EqualsAndHashCode.Exclude
+  @ToString.Exclude
+  ValueMetadata valueMetadata;
 
   public Object getMinValueWrapped() {
     return getValueMetadata().wrapValue(getMinValue());
   }
 
-  @Nullable
-  public T getMaxValue() {
-    return this.maxValue;
-  }
-
   public Object getMaxValueWrapped() {
     return getValueMetadata().wrapValue(getMaxValue());
   }
 
-  public long getNullCount() {
-    return nullCount;
-  }
-
-  public long getValueCount() {
-    return valueCount;
-  }
-
-  public long getTotalSize() {
-    return totalSize;
-  }
-
-  public long getTotalUncompressedSize() {
-    return totalUncompressedSize;
-  }
-
-  public ValueMetadata getValueMetadata() {
-    return valueMetadata;
-  }
-
-  @Override
-  public boolean equals(final Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    final HoodieColumnRangeMetadata<?> that = (HoodieColumnRangeMetadata<?>) o;
-    return Objects.equals(getFilePath(), that.getFilePath())
-        && Objects.equals(getColumnName(), that.getColumnName())
-        && Objects.equals(getMinValue(), that.getMinValue())
-        && Objects.equals(getMaxValue(), that.getMaxValue())
-        && Objects.equals(getNullCount(), that.getNullCount())
-        && Objects.equals(getValueCount(), that.getValueCount())
-        && Objects.equals(getTotalSize(), that.getTotalSize())
-        && Objects.equals(getTotalUncompressedSize(), 
that.getTotalUncompressedSize());
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(getColumnName(), getMinValue(), getMaxValue(), 
getNullCount());
-  }
-
-  @Override
-  public String toString() {
-    return "HoodieColumnRangeMetadata{"
-        + "filePath ='" + filePath + '\''
-        + ", columnName='" + columnName + '\''
-        + ", minValue=" + minValue
-        + ", maxValue=" + maxValue
-        + ", nullCount=" + nullCount
-        + ", valueCount=" + valueCount
-        + ", totalSize=" + totalSize
-        + ", totalUncompressedSize=" + totalUncompressedSize
-        + '}';
-  }
-
   public static <T extends Comparable<T>> HoodieColumnRangeMetadata<T> 
create(String filePath,
                                                                               
String columnName,
                                                                               
@Nullable T minValue,
diff --git a/hudi-common/src/main/java/org/apache/hudi/stats/ValueMetadata.java 
b/hudi-common/src/main/java/org/apache/hudi/stats/ValueMetadata.java
index 8246254eb22f..f0ff743cca70 100644
--- a/hudi-common/src/main/java/org/apache/hudi/stats/ValueMetadata.java
+++ b/hudi-common/src/main/java/org/apache/hudi/stats/ValueMetadata.java
@@ -26,6 +26,9 @@ import org.apache.hudi.common.schema.HoodieSchemaUtils;
 import org.apache.hudi.common.util.collection.Pair;
 import org.apache.hudi.metadata.HoodieIndexVersion;
 
+import lombok.AccessLevel;
+import lombok.AllArgsConstructor;
+import lombok.Getter;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.parquet.schema.PrimitiveType;
 
@@ -40,20 +43,14 @@ import static 
org.apache.hudi.metadata.HoodieMetadataPayload.COLUMN_STATS_FIELD_
  * Used for wrapping and unwrapping col stat values
  * as well as for type promotion
  */
+@AllArgsConstructor(access = AccessLevel.PROTECTED)
+@Getter
 public class ValueMetadata implements Serializable {
 
   private static final ParquetAdapter PARQUET_ADAPTER = 
ParquetAdapter.getAdapter();
 
   private final ValueType valueType;
 
-  protected ValueMetadata(ValueType valueType) {
-    this.valueType = valueType;
-  }
-
-  public ValueType getValueType() {
-    return valueType;
-  }
-
   public HoodieValueTypeInfo getValueTypeInfo() {
     return HoodieValueTypeInfo.newBuilder()
         .setTypeOrdinal(valueType.ordinal())
@@ -157,7 +154,9 @@ public class ValueMetadata implements Serializable {
       return new DecimalMetadata(precision, scale);
     }
 
+    @Getter
     private final int precision;
+    @Getter
     private final int scale;
 
     private DecimalMetadata(int precision, int scale) {
@@ -166,16 +165,6 @@ public class ValueMetadata implements Serializable {
       this.scale = scale;
     }
 
-    @Override
-    public int getPrecision() {
-      return precision;
-    }
-
-    @Override
-    public int getScale() {
-      return scale;
-    }
-
     @Override
     String getAdditionalInfo() {
       return DecimalValueMetadata.encodeData(this);
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClient.java 
b/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClient.java
index 444f1da554bb..660e5db2ef85 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClient.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClient.java
@@ -20,11 +20,9 @@ package org.apache.hudi.timeline;
 
 import org.apache.hudi.common.table.view.FileSystemViewStorageConfig;
 
+import lombok.extern.slf4j.Slf4j;
 import org.apache.http.client.utils.URIBuilder;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
@@ -33,9 +31,9 @@ import java.util.concurrent.TimeUnit;
  * to the Timeline Server from the executors.
  * This class uses the Fluent HTTP client part of the HTTPComponents.
  */
+@Slf4j
 public class TimelineServiceClient extends TimelineServiceClientBase {
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(TimelineServiceClient.class);
   private static final String DEFAULT_SCHEME = "http";
 
   protected final String timelineServerHost;
@@ -59,7 +57,7 @@ public class TimelineServiceClient extends 
TimelineServiceClientBase {
     }
 
     String url = builder.toString();
-    LOG.debug("Sending request : ({})", url);
+    log.debug("Sending request : ({})", url);
     org.apache.http.client.fluent.Response response = get(request.getMethod(), 
url, timeoutMs);
     return new Response(response.returnContent().asStream());
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClientBase.java
 
b/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClientBase.java
index b0a50d256a20..0177e5854a19 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClientBase.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/timeline/TimelineServiceClientBase.java
@@ -25,6 +25,8 @@ import org.apache.hudi.common.util.RetryHelper;
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.module.afterburner.AfterburnerModule;
+import lombok.AllArgsConstructor;
+import lombok.Getter;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -57,34 +59,20 @@ public abstract class TimelineServiceClientBase implements 
Serializable {
     return  (retryHelper != null) ? retryHelper.start(() -> 
executeRequest(request)) : executeRequest(request);
   }
 
+  @AllArgsConstructor
+  @Getter
   public static class Request {
+
     private final TimelineServiceClient.RequestMethod method;
     private final String path;
     private final Option<Map<String, String>> queryParameters;
 
-    private Request(TimelineServiceClient.RequestMethod method, String path, 
Option<Map<String, String>> queryParameters) {
-      this.method = method;
-      this.path = path;
-      this.queryParameters = queryParameters;
-    }
-
-    public RequestMethod getMethod() {
-      return method;
-    }
-
-    public String getPath() {
-      return path;
-    }
-
-    public Option<Map<String, String>> getQueryParameters() {
-      return queryParameters;
-    }
-
     public static TimelineServiceClient.Request.Builder 
newBuilder(TimelineServiceClient.RequestMethod method, String path) {
       return new TimelineServiceClient.Request.Builder(method, path);
     }
 
     public static class Builder {
+
       private final TimelineServiceClient.RequestMethod method;
       private final String path;
       private Option<Map<String, String>> queryParameters;

Reply via email to