AngersZhuuuu commented on a change in pull request #33441:
URL: https://github.com/apache/spark/pull/33441#discussion_r677094233
##########
File path:
external/avro/src/main/scala/org/apache/spark/sql/avro/AvroFileFormat.scala
##########
@@ -153,6 +154,27 @@ private[sql] class AvroFileFormat extends FileFormat
}
override def supportDataType(dataType: DataType): Boolean =
AvroUtils.supportsDataType(dataType)
+
+ override def supportFieldName(name: String): Unit = {
+ val length = name.length
+ if (length == 0) {
+ throw
QueryCompilationErrors.columnNameContainsInvalidCharactersError(name)
+ } else {
+ val first = name.charAt(0)
+ if (!Character.isLetter(first) && first != '_') {
Review comment:
> do you have some reference doc to prove this is indeed a limitation of
avro? or can you run some local tests like `df.write.format("avro").save(path)`?
For `df.write.format(source).save(path)`
Avro error message
```
[info] org.apache.avro.SchemaParseException: Illegal initial character:
(IF((ID = 1), 1, 0))
[info] at org.apache.avro.Schema.validateName(Schema.java:1562)
[info] at org.apache.avro.Schema.access$400(Schema.java:91)
[info] at org.apache.avro.Schema$Field.<init>(Schema.java:546)
[info] at
org.apache.avro.SchemaBuilder$FieldBuilder.completeField(SchemaBuilder.java:2240)
[info] at
org.apache.avro.SchemaBuilder$FieldBuilder.completeField(SchemaBuilder.java:2236)
[info] at
org.apache.avro.SchemaBuilder$FieldBuilder.access$5100(SchemaBuilder.java:2150)
[info] at
org.apache.avro.SchemaBuilder$GenericDefault.noDefault(SchemaBuilder.java:2539)
[info] at
org.apache.spark.sql.avro.SchemaConverters$.$anonfun$toAvroType$1(SchemaConverters.scala:194)
[info] at scala.collection.Iterator.foreach(Iterator.scala:943)
[info] at scala.collection.Iterator.foreach$(Iterator.scala:943)
[info] at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
[info] at scala.collection.IterableLike.foreach(IterableLike.scala:74)
[info] at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
[info] at
org.apache.spark.sql.types.StructType.foreach(StructType.scala:102)
[info] at
org.apache.spark.sql.avro.SchemaConverters$.toAvroType(SchemaConverters.scala:191)
[info] at
org.apache.spark.sql.avro.AvroUtils$.$anonfun$prepareWrite$1(AvroUtils.scala:98)
[info] at scala.Option.getOrElse(Option.scala:189)
[info] at
org.apache.spark.sql.avro.AvroUtils$.prepareWrite(AvroUtils.scala:97)
[info] at
org.apache.spark.sql.avro.AvroFileFormat.prepareWrite(AvroFileFormat.scala:74)
[info] at
org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:142)
[info] at
org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:186)
[info] at
org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:113)
[info] at
org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:111)
[info] at
org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:125)
[info] at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:97)
[info] at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
[info] at
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
[info] at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
[info] at
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
```
parquet error message
```
[info] - SPARK-33865: Hive DDL with avro should check col name *** FAILED
*** (4 seconds, 554 milliseconds)
[info] org.apache.spark.sql.AnalysisException: Column name "(IF((ID = 1),
1, 0))" contains invalid character(s). Please use alias to rename it.
[info] at
org.apache.spark.sql.errors.QueryCompilationErrors$.columnNameContainsInvalidCharactersError(QueryCompilationErrors.scala:2105)
[info] at
org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$.checkFieldName(ParquetSchemaConverter.scala:590)
[info] at
org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport$.$anonfun$setSchema$2(ParquetWriteSupport.scala:485)
[info] at
org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport$.$anonfun$setSchema$2$adapted(ParquetWriteSupport.scala:485)
[info] at scala.collection.immutable.List.foreach(List.scala:431)
[info] at
org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport$.setSchema(ParquetWriteSupport.scala:485)
[info] at
org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.prepareWrite(ParquetFileFormat.scala:110)
[info] at
org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:142)
[info] at
org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:186)
[info] at
org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:113)
[info] at
org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:111)
[info] at
org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:125)
[info] at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:97)
[info] at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
[info] at
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
[info] at
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
[info] at
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)
[info] at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
[info] at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:97)
[info] at
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:93)
[info] at
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:481)
[info] at
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:82)
[info] at
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:481)
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]