This is an automated email from the ASF dual-hosted git repository.

danny0405 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 75ab91c58e5 [HUDI-5530] Fix WARNING during compile (#7647)
75ab91c58e5 is described below

commit 75ab91c58e538e206ab64e57befee50fa61be933
Author: jakevin <[email protected]>
AuthorDate: Tue Sep 17 09:19:08 2024 +0800

    [HUDI-5530] Fix WARNING during compile (#7647)
---
 .../procedures/ExportInstantsProcedure.scala       | 80 +++++++++++-----------
 .../org/apache/hudi/ScalaAssertionSupport.scala    |  2 +-
 .../org/apache/hudi/TestHoodieSparkSqlWriter.scala |  4 +-
 .../execution/benchmark/AvroSerDerBenchmark.scala  |  2 +-
 .../procedure/TestHdfsParquetImportProcedure.scala | 16 ++---
 5 files changed, 50 insertions(+), 54 deletions(-)

diff --git 
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ExportInstantsProcedure.scala
 
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ExportInstantsProcedure.scala
index 5d1be415923..f44b478f3f8 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ExportInstantsProcedure.scala
+++ 
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ExportInstantsProcedure.scala
@@ -127,49 +127,47 @@ class ExportInstantsProcedure extends BaseProcedure with 
ProcedureBuilder with L
         reader.hasNext && copyCount < limit
       }) {
         val blk = reader.next.asInstanceOf[HoodieAvroDataBlock]
-        try {
-          val recordItr = blk.getRecordIterator(HoodieRecordType.AVRO)
-          try while ( {
-            recordItr.hasNext
-          }) {
-            val ir = recordItr.next
-            // Archived instants are saved as arvo encoded 
HoodieArchivedMetaEntry records. We need to get the
-            // metadata record from the entry and convert it to json.
-            val archiveEntryRecord = 
SpecificData.get.deepCopy(HoodieArchivedMetaEntry.SCHEMA$, 
ir).asInstanceOf[HoodieArchivedMetaEntry]
-            val action = archiveEntryRecord.get("actionType").toString
-            if (!actionSet.contains(action)) break() //todo: continue is not 
supported
-            val metadata: GenericRecord = action match {
-              case HoodieTimeline.CLEAN_ACTION =>
-                archiveEntryRecord.getHoodieCleanMetadata
-
-              case HoodieTimeline.COMMIT_ACTION =>
-                archiveEntryRecord.getHoodieCommitMetadata
-
-              case HoodieTimeline.DELTA_COMMIT_ACTION =>
-                archiveEntryRecord.getHoodieCommitMetadata
-
-              case HoodieTimeline.ROLLBACK_ACTION =>
-                archiveEntryRecord.getHoodieRollbackMetadata
-
-              case HoodieTimeline.SAVEPOINT_ACTION =>
-                archiveEntryRecord.getHoodieSavePointMetadata
-
-              case HoodieTimeline.COMPACTION_ACTION =>
-                archiveEntryRecord.getHoodieCompactionMetadata
-
-              case _ => logInfo("Unknown type of action " + action)
-                null
-            }
-            val instantTime = archiveEntryRecord.get("commitTime").toString
-            val outPath = localFolder + StoragePath.SEPARATOR + instantTime + 
"." + action
-            if (metadata != null) writeToFile(storage, outPath, 
HoodieAvroUtils.avroToJson(metadata, true))
-            if ( {
-              copyCount += 1;
-              copyCount
-            } == limit) break //todo: break is not supported
+        val recordItr = blk.getRecordIterator(HoodieRecordType.AVRO)
+        try while ( {
+          recordItr.hasNext
+        }) {
+          val ir = recordItr.next
+          // Archived instants are saved as arvo encoded 
HoodieArchivedMetaEntry records. We need to get the
+          // metadata record from the entry and convert it to json.
+          val archiveEntryRecord = 
SpecificData.get.deepCopy(HoodieArchivedMetaEntry.SCHEMA$, 
ir).asInstanceOf[HoodieArchivedMetaEntry]
+          val action = archiveEntryRecord.get("actionType").toString
+          if (!actionSet.contains(action)) break() //todo: continue is not 
supported
+          val metadata: GenericRecord = action match {
+            case HoodieTimeline.CLEAN_ACTION =>
+              archiveEntryRecord.getHoodieCleanMetadata
+
+            case HoodieTimeline.COMMIT_ACTION =>
+              archiveEntryRecord.getHoodieCommitMetadata
+
+            case HoodieTimeline.DELTA_COMMIT_ACTION =>
+              archiveEntryRecord.getHoodieCommitMetadata
+
+            case HoodieTimeline.ROLLBACK_ACTION =>
+              archiveEntryRecord.getHoodieRollbackMetadata
+
+            case HoodieTimeline.SAVEPOINT_ACTION =>
+              archiveEntryRecord.getHoodieSavePointMetadata
+
+            case HoodieTimeline.COMPACTION_ACTION =>
+              archiveEntryRecord.getHoodieCompactionMetadata
+
+            case _ => logInfo("Unknown type of action " + action)
+              null
           }
-          finally if (recordItr != null) recordItr.close()
+          val instantTime = archiveEntryRecord.get("commitTime").toString
+          val outPath = localFolder + StoragePath.SEPARATOR + instantTime + 
"." + action
+          if (metadata != null) writeToFile(storage, outPath, 
HoodieAvroUtils.avroToJson(metadata, true))
+          if ( {
+            copyCount += 1;
+            copyCount
+          } == limit) break //todo: break is not supported
         }
+        finally if (recordItr != null) recordItr.close()
       }
       reader.close()
     }
diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/ScalaAssertionSupport.scala
 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/ScalaAssertionSupport.scala
index a6a80ee34a3..e13ec474819 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/ScalaAssertionSupport.scala
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/ScalaAssertionSupport.scala
@@ -30,7 +30,7 @@ trait ScalaAssertionSupport {
         // scalastyle:off return
         return t.asInstanceOf[T]
       // scalastyle:on return
-      case ot @ _ =>
+      case ot : Throwable =>
         fail(s"Expected exception of class $expectedExceptionClass, but 
${ot.getClass} has been thrown: $ot\n${ot.getStackTrace.mkString("\n")}")
     }
 
diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/TestHoodieSparkSqlWriter.scala
 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/TestHoodieSparkSqlWriter.scala
index a4c3376d108..6128392c061 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/TestHoodieSparkSqlWriter.scala
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/TestHoodieSparkSqlWriter.scala
@@ -1171,7 +1171,7 @@ def testBulkInsertForDropPartitionColumn(): Unit = {
         .option(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key, 
classOf[SimpleKeyGenerator].getName)
         .mode(SaveMode.Append).save(tablePath1)
     } catch {
-      case _ => fail("Switching from no keygen to explicit SimpleKeyGenerator 
should not fail");
+      case _: Throwable => fail("Switching from no keygen to explicit 
SimpleKeyGenerator should not fail");
     }
   }
 
@@ -1204,7 +1204,7 @@ def testBulkInsertForDropPartitionColumn(): Unit = {
         .option(HoodieWriteConfig.TBL_NAME.key, tableName1)
         .mode(SaveMode.Append).save(tablePath1)
     } catch {
-      case _ => fail("Switching from  explicit SimpleKeyGenerator to default 
keygen should not fail");
+      case _: Throwable => fail("Switching from  explicit SimpleKeyGenerator 
to default keygen should not fail");
     }
   }
 
diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroSerDerBenchmark.scala
 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroSerDerBenchmark.scala
index 842276c0d40..fd11778a500 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroSerDerBenchmark.scala
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/execution/benchmark/AvroSerDerBenchmark.scala
@@ -83,7 +83,7 @@ object AvroSerDerBenchmark extends HoodieBenchmarkBase {
       testRdd.mapPartitions { iter =>
         val schema = 
AvroConversionUtils.convertStructTypeToAvroSchema(sparkSchema, "record", "my")
         val avroToRowConverter = 
AvroConversionUtils.createAvroToInternalRowConverter(schema, sparkSchema)
-        iter.map(record => 
avroToRowConverter.apply(record.asInstanceOf[GenericRecord]).get)
+        iter.map(record => avroToRowConverter.apply(record).get)
       }.foreach(f => f)
     }
     benchmark.run()
diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestHdfsParquetImportProcedure.scala
 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestHdfsParquetImportProcedure.scala
index 31a1a89fc1e..e85d660f0f5 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestHdfsParquetImportProcedure.scala
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestHdfsParquetImportProcedure.scala
@@ -121,17 +121,15 @@ class TestHdfsParquetImportProcedure extends 
HoodieSparkProcedureTestBase {
       records.add(new 
HoodieTestDataGenerator().generateGenericRecord(recordNum.toString,
         "0", "rider-" + recordNum, "driver-" + recordNum, startTime + 
TimeUnit.HOURS.toSeconds(recordNum)))
     }
+    val writer: ParquetWriter[GenericRecord] = 
AvroParquetWriter.builder[GenericRecord](srcFile)
+      .withSchema(HoodieTestDataGenerator.AVRO_SCHEMA)
+      .withConf(HoodieTestUtils.getDefaultStorageConf.unwrap()).build
     try {
-      val writer: ParquetWriter[GenericRecord] = 
AvroParquetWriter.builder[GenericRecord](srcFile)
-        .withSchema(HoodieTestDataGenerator.AVRO_SCHEMA)
-        .withConf(HoodieTestUtils.getDefaultStorageConf.unwrap()).build
-      try {
-        for (record <- records.asScala) {
-          writer.write(record)
-        }
-      } finally {
-        if (writer != null) writer.close()
+      for (record <- records.asScala) {
+        writer.write(record)
       }
+    } finally {
+      if (writer != null) writer.close()
     }
     records
   }

Reply via email to