This is an automated email from the ASF dual-hosted git repository.
wombatukun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new e67d0aa71e2 Improve a test (#12605)
e67d0aa71e2 is described below
commit e67d0aa71e2253a5b5cf95028cdf95482ffeca6a
Author: Lin Liu <[email protected]>
AuthorDate: Wed Jan 8 21:09:42 2025 -0800
Improve a test (#12605)
---
.../TestHoodieMultipleBaseFileFormat.scala | 24 +++++++++++++++++-----
1 file changed, 19 insertions(+), 5 deletions(-)
diff --git
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestHoodieMultipleBaseFileFormat.scala
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestHoodieMultipleBaseFileFormat.scala
index 64b74cb52ad..c336a21f54a 100644
---
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestHoodieMultipleBaseFileFormat.scala
+++
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestHoodieMultipleBaseFileFormat.scala
@@ -19,18 +19,20 @@
package org.apache.hudi.functional
-import org.apache.hudi.{DataSourceWriteOptions, DefaultSparkRecordMerger,
SparkDatasetMixin}
-import org.apache.hudi.common.config.HoodieStorageConfig
+import org.apache.hudi.common.config.{HoodieCommonConfig,
HoodieMetadataConfig, HoodieStorageConfig}
+import org.apache.hudi.common.engine.{HoodieEngineContext,
HoodieLocalEngineContext}
import org.apache.hudi.common.model.{HoodieFileFormat, HoodieTableType}
-import org.apache.hudi.common.table.HoodieTableConfig
+import org.apache.hudi.common.table.view.{FileSystemViewManager,
FileSystemViewStorageConfig, SyncableFileSystemView}
+import org.apache.hudi.common.table.{HoodieTableConfig, HoodieTableMetaClient}
import
org.apache.hudi.common.testutils.HoodieTestDataGenerator.{DEFAULT_FIRST_PARTITION_PATH,
DEFAULT_SECOND_PARTITION_PATH}
import org.apache.hudi.common.testutils.RawTripTestPayload.recordsToStrings
import org.apache.hudi.config.HoodieWriteConfig
+import org.apache.hudi.metadata.HoodieTableMetadata
import org.apache.hudi.testutils.HoodieSparkClientTestBase
-
+import org.apache.hudi.{DataSourceWriteOptions, DefaultSparkRecordMerger,
SparkDatasetMixin}
import org.apache.spark.sql.{Dataset, Row, SaveMode, SparkSession}
+import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue}
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
-import org.junit.jupiter.api.Assertions.assertEquals
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
@@ -104,6 +106,18 @@ class TestHoodieMultipleBaseFileFormat extends
HoodieSparkClientTestBase with Sp
.mode(SaveMode.Append)
.save(basePath)
+ // Check if the ORC files exist.
+ val metaClient =
HoodieTableMetaClient.builder().setBasePath(basePath).setStorage(storage).build
+ val engineContext: HoodieEngineContext = new
HoodieLocalEngineContext(storageConf)
+ val metadataConfig: HoodieMetadataConfig =
HoodieMetadataConfig.newBuilder.build
+ val viewManager: FileSystemViewManager =
FileSystemViewManager.createViewManager(
+ engineContext, FileSystemViewStorageConfig.newBuilder.build,
+ HoodieCommonConfig.newBuilder.build,
+ (mc: HoodieTableMetaClient) => HoodieTableMetadata.create(engineContext,
mc.getStorage, metadataConfig, basePath))
+ val fsView: SyncableFileSystemView =
viewManager.getFileSystemView(metaClient)
+ val orcFiles =
fsView.getAllBaseFiles(DEFAULT_SECOND_PARTITION_PATH).filter(bf =>
bf.getFileName.endsWith("orc"))
+ assertTrue(orcFiles.count() > 0)
+
// Snapshot Read the table
val hudiDf = spark.read.format("hudi").load(basePath)
assertEquals(20, hudiDf.count())