ad1happy2go commented on issue #10107:
URL: https://github.com/apache/hudi/issues/10107#issuecomment-1813869857

   @haoxie-aws I tried to reproduce this with OSS version but couldn't able to 
reproduce. Can you try with the later version. Below is the code I used. 
   
   Writer 
   ```
   spark = get_spark_session(spark_version="3.2", hudi_version="0.11.0")
   
   def generateDataFrame():
       # Define the schema for the DataFrame
       schema = StructType([
           StructField("uuid", StringType(), True),
           StructField("index", StringType(), True),
           StructField("timestamp", StringType(), True)
       ])
   
       # Create a list of Row objects
       data = [Row(str(uuid.uuid4()), str(i), str(datetime.now())) for i in 
range(100001)]
   
       # Parallelize the data using SparkContext and create an RDD
       rdd = spark.sparkContext.parallelize(data)
   
       # Create a DataFrame from the RDD and schema
       df = spark.createDataFrame(rdd, schema)
   
       return df
   
   def loop():
       # Concatenate Hudi options into a single string
       hudi_options = {
           "hoodie.table.name": TABLE_NAME,
           "hoodie.table.type": "COPY_ON_WRITE",
           "hoodie.datasource.write.recordkey.field": "uuid",
           "hoodie.datasource.write.precombine.field": "timestamp",
           "hoodie.datasource.write.operation": "upsert",
           "hoodie.parquet.max.file.size" : "20971520",
           "hoodie.parquet.small.file.limit" : "0", # 20MB
           "hoodie.keep.max.commits" : "12",
           "hoodie.keep.min.commits" : "11",
           "hoodie.bulkinsert.sort.mode" : "NONE",
           "hoodie.clustering.inline" : "true",
           "hoodie.clustering.inline.max.commits" : "2",
           "hoodie.clustering.plan.strategy.small.file.limit" : "20971520" , # 
20MB
           "clustering.plan.strategy.target.file.max.bytes" : "31457280", # 30 
MB
           "hoodie.metadata.enable" : "true"
       }
   
   
       # Write DataFrame to Hudi
       
generateDataFrame().write.options(**hudi_options).format("org.apache.hudi") \
           .option("hoodie.datasource.write.hive_style_partitioning", "true") \
           .mode("append") \
           .save(PATH)
   
   
   if __name__ == "__main__":
       for _ in range(1001):
           loop()
   ```
   
   READER 
   ```
   spark = get_spark_session(spark_version="3.2", hudi_version="0.11.0")
   
   def loop():
       print(spark.read.format("hudi").load(PATH).count())
       spark.read.format("hudi").load(PATH).show()
   
   if __name__ == "__main__":
       for _ in range(1001):
           loop()
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to