This is an automated email from the ASF dual-hosted git repository.

vinoth pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 446ee91d7353 docs: Change some noisy WARN logs to INFO/DEBUG (#13986)
446ee91d7353 is described below

commit 446ee91d73538dd9d386c205aee6c01d7bdc37a2
Author: vinoth chandar <[email protected]>
AuthorDate: Wed Sep 24 13:31:12 2025 -0700

    docs: Change some noisy WARN logs to INFO/DEBUG (#13986)
    
    - rewrote some messages to be correct/clear
     - Added a line to README around Spark 4 profile
---
 README.md                                                            | 1 +
 .../apache/hudi/table/action/index/ScheduleIndexActionExecutor.java  | 5 +++--
 .../main/java/org/apache/hudi/common/table/TableSchemaResolver.java  | 2 +-
 .../java/org/apache/hudi/common/table/log/block/HoodieLogBlock.java  | 2 +-
 .../common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java | 4 ++--
 .../org/apache/hudi/common/config/DFSPropertiesConfiguration.java    | 4 ++--
 hudi-io/src/main/java/org/apache/hudi/storage/HoodieStorage.java     | 3 +--
 7 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/README.md b/README.md
index c62987c8888b..16f24238f25b 100644
--- a/README.md
+++ b/README.md
@@ -134,6 +134,7 @@ Refer to the table below for building with different Spark 
and Scala versions.
 | `-Dspark3.4`              | hudi-spark3.4-bundle_2.12                    | 
For Spark 3.4.x and Scala 2.12                   |
 | `-Dspark3.5 -Dscala-2.12` | hudi-spark3.5-bundle_2.12                    | 
For Spark 3.5.x and Scala 2.12 (same as default) |
 | `-Dspark3.5 -Dscala-2.13` | hudi-spark3.5-bundle_2.13                    | 
For Spark 3.5.x and Scala 2.13                   |
+| `-Dspark4.0`              | hudi-spark4.0-bundle_2.13                    | 
For Spark 4.0 and Scala 2.13 (Needs java 17)     |
 | `-Dspark3`                | hudi-spark3-bundle_2.12 (legacy bundle name) | 
For Spark 3.5.x and Scala 2.12                   |
 
 Please note that only Spark-related bundles, i.e., `hudi-spark-bundle`, 
`hudi-utilities-bundle`,
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/ScheduleIndexActionExecutor.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/ScheduleIndexActionExecutor.java
index 48ec417f3b37..16427dfac8bd 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/ScheduleIndexActionExecutor.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/index/ScheduleIndexActionExecutor.java
@@ -94,9 +94,10 @@ public class ScheduleIndexActionExecutor<T, I, K, O> extends 
BaseActionExecutor<
     requestedPartitions.removeAll(indexesInflightOrCompleted);
 
     if (!requestedPartitions.isEmpty()) {
-      LOG.warn("Following partitions already exist or inflight: {}. Going to 
schedule indexing of only these partitions: {}", indexesInflightOrCompleted, 
requestedPartitions);
+      LOG.info("Some index partitions already exist: {}. Scheduling indexing 
of only these remaining partitions: {}",
+          indexesInflightOrCompleted, requestedPartitions);
     } else {
-      LOG.error("All requested index types are inflight or completed: {}", 
partitionIndexTypes);
+      LOG.info("All requested index partitions exist (either inflight or 
built): {}", partitionIndexTypes);
       return Option.empty();
     }
     List<MetadataPartitionType> finalPartitionsToIndex = 
partitionIndexTypes.stream()
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/TableSchemaResolver.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/TableSchemaResolver.java
index a1385572091d..2e5cb79db203 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/TableSchemaResolver.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/TableSchemaResolver.java
@@ -265,7 +265,7 @@ public class TableSchemaResolver {
               .map(writeStat -> new StoragePath(metaClient.getBasePath(), 
writeStat.getPath()));
           return Option.of(fetchSchemaFromFiles(filePaths));
         } else {
-          LOG.warn("Could not find any data file written for commit, so could 
not get schema for table {}", metaClient.getBasePath());
+          LOG.debug("Could not find any data file written for commit, so could 
not get schema for table {}", metaClient.getBasePath());
           return Option.empty();
         }
       default:
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieLogBlock.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieLogBlock.java
index 948ed30fede5..554b83b3455d 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieLogBlock.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieLogBlock.java
@@ -176,7 +176,7 @@ public abstract class HoodieLogBlock {
   }
 
   protected void removeBaseFileInstantTimeOfPositions() {
-    LOG.warn("There are records without valid positions. "
+    LOG.info("There are records without valid positions. "
         + "Skip writing record positions to the block header.");
     
logBlockHeader.remove(HeaderMetadataType.BASE_FILE_INSTANT_TIME_OF_RECORD_POSITIONS);
   }
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
index 2339d5993647..66919615486a 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/read/buffer/PositionBasedFileGroupRecordBuffer.java
@@ -96,7 +96,7 @@ public class PositionBasedFileGroupRecordBuffer<T> extends 
KeyBasedFileGroupReco
     // Extract positions from data block.
     List<Long> recordPositions = extractRecordPositions(dataBlock, 
baseFileInstantTime);
     if (recordPositions == null) {
-      LOG.debug("Falling back to key based merge for Read");
+      LOG.debug("Falling back to key based merge for data block");
       fallbackToKeyBasedBuffer();
       super.processDataBlock(dataBlock, keySpecOpt);
       return;
@@ -181,7 +181,7 @@ public class PositionBasedFileGroupRecordBuffer<T> extends 
KeyBasedFileGroupReco
 
     List<Long> recordPositions = extractRecordPositions(deleteBlock, 
baseFileInstantTime);
     if (recordPositions == null) {
-      LOG.warn("Falling back to key based merge for Read");
+      LOG.debug("Falling back to key based merging for delete block");
       fallbackToKeyBasedBuffer();
       super.processDeleteBlock(deleteBlock);
       return;
diff --git 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
index 3f74727a4fa4..365dd65f73a6 100644
--- 
a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
+++ 
b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
@@ -154,7 +154,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
 
     try {
       if (filePath.equals(DEFAULT_PATH) && !storage.exists(filePath)) {
-        LOG.warn("Properties file " + filePath + " not found. Ignoring to load 
props file");
+        LOG.debug("Properties file {} not found. Ignoring to load props file", 
filePath);
         return;
       }
     } catch (IOException ioe) {
@@ -231,7 +231,7 @@ public class DFSPropertiesConfiguration extends 
PropertiesConfig {
   private static Option<StoragePath> getConfPathFromEnv() {
     String confDir = System.getenv(CONF_FILE_DIR_ENV_NAME);
     if (confDir == null) {
-      LOG.warn("Cannot find " + CONF_FILE_DIR_ENV_NAME + ", please set it as 
the dir of " + DEFAULT_PROPERTIES_FILE);
+      LOG.debug("Environment variable " + CONF_FILE_DIR_ENV_NAME + ", not set. 
If desired, set it to the folder containing: " + DEFAULT_PROPERTIES_FILE);
       return Option.empty();
     }
     if (StringUtils.isNullOrEmpty(URI.create(confDir).getScheme())) {
diff --git a/hudi-io/src/main/java/org/apache/hudi/storage/HoodieStorage.java 
b/hudi-io/src/main/java/org/apache/hudi/storage/HoodieStorage.java
index adcbf6a630f3..004d88b66183 100644
--- a/hudi-io/src/main/java/org/apache/hudi/storage/HoodieStorage.java
+++ b/hudi-io/src/main/java/org/apache/hudi/storage/HoodieStorage.java
@@ -374,8 +374,7 @@ public abstract class HoodieStorage implements Closeable {
         if (!renameSuccess && null != tmpPath) {
           try {
             deleteFile(tmpPath);
-            LOG.warn("Fail to rename " + tmpPath + " to " + path
-                + ", target file exists: " + exists(path));
+            LOG.debug("Failed to rename {} to {}, target file exists: {}", 
tmpPath, path, exists(path));
           } catch (IOException e) {
             throw new HoodieIOException("Failed to delete tmp file " + 
tmpPath, e);
           }

Reply via email to