aresa7796 opened a new issue #3460:
URL: https://github.com/apache/hudi/issues/3460


   Steps to reproduce the behavior:
   ```
   object HudiExample {
     def main(args: Array[String]): Unit = {
       val config = ConfigFactory.load()
   
       val conf = new SparkConf()
               .setMaster("local[*]")
         .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
         .set("spark.kryoserializer.buffer.max","512m")
         .setAppName("hudi-batch")
   
       val spark = 
SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
       spark.sparkContext.setLogLevel("error")
       val data = 
Seq("""{"timestamp":1628752653,"_track_id":1,"$column1":"123","$column2":"234"}""","""{"timestamp":1628752654,"_track_id":2,"$column1":"2","$column2":"2","type":
 1}""")
       import spark.implicits._
       val ds = spark.createDataset(data)
       val df = spark.read.json(ds)
   
       df.write
         .format("org.apache.hudi")
         .options(getQuickstartWriteConfigs)
         
.option(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY,DataSourceWriteOptions.MOR_TABLE_TYPE_OPT_VAL)
         
.option(DataSourceWriteOptions.STORAGE_TYPE_OPT_KEY,DataSourceWriteOptions.MOR_STORAGE_TYPE_OPT_VAL)
         .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY, "timestamp")
         .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY, "_track_id")
         .option("hoodie.table.name", "hudi_example")
         .mode(SaveMode.Append)
         .save("./hudi_example")
     }
   }
   
   ```
   
   **Expected behavior**
   
   A clear and concise description of what you expected to happen.
   
   **Environment Description**
   
   * Hudi version : 0.8.0 
   
   * Spark version : 3.1.2
   
   * Running on Docker? (yes/no) : no
   
   
   **Additional context**
   
   Add any other context about the problem here.
   
   **Stacktrace**
   
   ```
   Exception in thread "main" org.apache.avro.SchemaParseException: Illegal 
initial character: $column1
        at org.apache.avro.Schema.validateName(Schema.java:1147)
        at org.apache.avro.Schema.access$200(Schema.java:81)
        at org.apache.avro.Schema$Field.<init>(Schema.java:403)
        at 
org.apache.avro.SchemaBuilder$FieldBuilder.completeField(SchemaBuilder.java:2124)
        at 
org.apache.avro.SchemaBuilder$FieldBuilder.completeField(SchemaBuilder.java:2120)
        at 
org.apache.avro.SchemaBuilder$FieldBuilder.access$5200(SchemaBuilder.java:2034)
        at 
org.apache.avro.SchemaBuilder$GenericDefault.noDefault(SchemaBuilder.java:2417)
        at 
org.apache.hudi.spark.org.apache.spark.sql.avro.SchemaConverters$.$anonfun$toAvroType$1(SchemaConverters.scala:177)
        at scala.collection.Iterator.foreach(Iterator.scala:943)
        at scala.collection.Iterator.foreach$(Iterator.scala:943)
        at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
        at scala.collection.IterableLike.foreach(IterableLike.scala:74)
        at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
        at org.apache.spark.sql.types.StructType.foreach(StructType.scala:102)
        at 
org.apache.hudi.spark.org.apache.spark.sql.avro.SchemaConverters$.toAvroType(SchemaConverters.scala:174)
        at 
org.apache.hudi.AvroConversionUtils$.convertStructTypeToAvroSchema(AvroConversionUtils.scala:52)
        at 
org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:144)
        at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:145)
        at 
org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)
        at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
        at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
        at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:90)
        at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:180)
        at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:218)
        at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
        at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:215)
        at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:176)
        at 
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:132)
        at 
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:131)
        at 
org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:989)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
        at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
        at 
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:989)
        at 
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:438)
        at 
org.apache.spark.sql.DataFrameWriter.saveInternal(DataFrameWriter.scala:415)
        at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293)
   
   ```
   
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to