This is an automated email from the ASF dual-hosted git repository.

sivabalan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 042a0c3b8ec0 [MINOR] Reducing few noisy logs (#13895)
042a0c3b8ec0 is described below

commit 042a0c3b8ec0377bd7d87730dc802287a34f629f
Author: Sivabalan Narayanan <[email protected]>
AuthorDate: Wed Sep 17 17:47:05 2025 -0700

    [MINOR] Reducing few noisy logs (#13895)
---
 .../src/main/scala/org/apache/spark/HoodieSparkKryoRegistrar.scala | 2 +-
 .../table/read/buffer/PositionBasedFileGroupRecordBuffer.java      | 2 +-
 .../org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java    | 7 -------
 .../src/main/scala/org/apache/hudi/HoodieFileIndex.scala           | 2 +-
 .../src/main/scala/org/apache/hudi/SparkHoodieTableFileIndex.scala | 2 +-
 5 files changed, 4 insertions(+), 11 deletions(-)

diff --git 
a/hudi-client/hudi-spark-client/src/main/scala/org/apache/spark/HoodieSparkKryoRegistrar.scala
 
b/hudi-client/hudi-spark-client/src/main/scala/org/apache/spark/HoodieSparkKryoRegistrar.scala
index fcfe78b0bf0c..d42208d10bda 100644
--- 
a/hudi-client/hudi-spark-client/src/main/scala/org/apache/spark/HoodieSparkKryoRegistrar.scala
+++ 
b/hudi-client/hudi-spark-client/src/main/scala/org/apache/spark/HoodieSparkKryoRegistrar.scala
@@ -83,7 +83,7 @@ class HoodieSparkKryoRegistrar extends 
HoodieCommonKryoRegistrar with KryoRegist
         kryo.addDefaultSerializer(classOf[Message], new ProtobufSerializer())
       }
     } catch {
-      case _: ClassNotFoundException | _: NoClassDefFoundError => 
log.warn("Protobuf classes not found on the classpath, skipping Protobuf 
serializer registration.")
+      case _: ClassNotFoundException | _: NoClassDefFoundError => 
log.debug("Protobuf classes not found on the classpath, skipping Protobuf 
serializer registration.")
     }
   }
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
index 415a5731e8af..8487cd310a40 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
@@ -96,7 +96,7 @@ public class PositionBasedFileGroupRecordBuffer<T> extends 
KeyBasedFileGroupReco
     // Extract positions from data block.
     List<Long> recordPositions = extractRecordPositions(dataBlock, 
baseFileInstantTime);
     if (recordPositions == null) {
-      LOG.warn("Falling back to key based merge for Read");
+      LOG.debug("Falling back to key based merge for Read");
       fallbackToKeyBasedBuffer();
       super.processDataBlock(dataBlock, keySpecOpt);
       return;
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
index 48aae71fd0c9..8f6e12487db3 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/io/storage/HoodieNativeAvroHFileReader.java
@@ -244,10 +244,6 @@ public class HoodieNativeAvroHFileReader extends 
HoodieAvroHFileReaderImplBase {
       List<Expression> children = ((Predicates.In) 
keyFilterOpt.get()).getRightChildren();
       keys = children.stream().map(e -> (String) 
e.eval(null)).collect(Collectors.toList());
     }
-
-    if (keys.isEmpty() && keyFilterOpt.isPresent()) {
-      LOG.warn("Cannot extract valid keys from predicate: {}", 
keyFilterOpt.get());
-    }
     return keys;
   }
 
@@ -260,9 +256,6 @@ public class HoodieNativeAvroHFileReader extends 
HoodieAvroHFileReaderImplBase {
       keyPrefixes = children.stream()
           .map(e -> (String) e.eval(null)).collect(Collectors.toList());
     }
-    if (keyPrefixes.isEmpty() && keyFilterOpt.isPresent()) {
-      LOG.warn("Cannot extract valid key prefixes from predicate: {}", 
keyFilterOpt.get());
-    }
     return keyPrefixes;
   }
 
diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieFileIndex.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieFileIndex.scala
index 46a75b9ac958..26e2c3f68e30 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieFileIndex.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieFileIndex.scala
@@ -473,7 +473,7 @@ case class HoodieFileIndex(spark: SparkSession,
   private def isIndexAvailable: Boolean = indicesSupport.exists(idx => 
idx.isIndexAvailable)
 
   private def validateConfig(): Unit = {
-    if (isDataSkippingEnabled && (!isMetadataTableEnabled || 
!isIndexAvailable)) {
+    if (isDataSkippingEnabled && (!isMetadataTableEnabled || 
!isIndexAvailable) && !metaClient.isMetadataTable) {
       logWarning("Data skipping requires Metadata Table and at least one of 
the indices to be enabled! "
         + s"(isMetadataTableEnabled = $isMetadataTableEnabled, 
isColumnStatsIndexEnabled = $isColumnStatsIndexEnabled"
         + s", isRecordIndexApplicable = $isRecordIndexEnabled, 
isExpressionIndexEnabled = $isExpressionIndexEnabled, " +
diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/SparkHoodieTableFileIndex.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/SparkHoodieTableFileIndex.scala
index 1cb212a82fb0..b42c2d17bea4 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/SparkHoodieTableFileIndex.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/SparkHoodieTableFileIndex.scala
@@ -153,7 +153,7 @@ class SparkHoodieTableFileIndex(spark: SparkSession,
     } else {
       // If the partition columns have not stored in hoodie.properties(the 
table that was
       // created earlier), we trait it as a non-partitioned table.
-      logWarning("No partition columns available from hoodie.properties." +
+      logDebug("No partition columns available from hoodie.properties." +
         " Partition pruning will not work")
       new StructType()
     }

Reply via email to