[jira] [Commented] (SPARK-18536) Failed to save to hive table when case class with empty field

2017-10-10 Thread Hyukjin Kwon (JIRA)

[ 
https://issues.apache.org/jira/browse/SPARK-18536?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16198613#comment-16198613
 ] 

Hyukjin Kwon commented on SPARK-18536:
--

The codes:

{code}
import scala.collection.mutable.Queue

import org.apache.spark.sql.SaveMode
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext


case class EmptyC()
case class EmptyCTable(dimensions: EmptyC, timebin: java.lang.Long)


val seq = Seq(EmptyCTable(EmptyC(), 100L))
val rdd = sc.makeRDD[EmptyCTable](seq)
val ssc = new StreamingContext(sc, Seconds(1))

val queue = Queue(rdd)
val s = ssc.queueStream(queue, false);
s.foreachRDD((rdd, time) => {
 if (!rdd.isEmpty) {
rdd.toDF.write.mode(SaveMode.Overwrite).saveAsTable("empty_table")
  }
})

ssc.start()
ssc.awaitTermination()
{code}


now throws:


{code}
org.apache.spark.sql.AnalysisException: cannot resolve 'named_struct()' due to 
data type mismatch: input to function named_struct requires at least one 
argument;;
'SerializeFromObject [if (isnull(assertnotnull(assertnotnull(input[0, 
$line22.$read$$iw$$iw$EmptyCTable, true])).dimensions)) null else 
named_struct() AS dimensions#3, assertnotnull(assertnotnull(input[0, 
$line22.$read$$iw$$iw$EmptyCTable, true])).timebin.longValue AS timebin#4L]
+- ExternalRDD [obj#2]

at 
org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
at 
org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:95)
at 
org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:87)
at 
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at 
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:288)
at 
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at 
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at 
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
{code}

> Failed to save to hive table when case class with empty field
> -
>
> Key: SPARK-18536
> URL: https://issues.apache.org/jira/browse/SPARK-18536
> Project: Spark
>  Issue Type: Bug
>  Components: SQL
>Affects Versions: 2.0.1
>Reporter: pin_zhang
>
> {code}import scala.collection.mutable.Queue
> import org.apache.spark.SparkConf
> import org.apache.spark.SparkContext
> import org.apache.spark.sql.SaveMode
> import org.apache.spark.sql.SparkSession
> import org.apache.spark.streaming.Seconds
> import org.apache.spark.streaming.StreamingContext
> {code}
> 1. Test code
> {code}
> case class EmptyC()
> case class EmptyCTable(dimensions: EmptyC, timebin: java.lang.Long)
> object EmptyTest {
>   def main(args: Array[String]): Unit = {
> val conf = new SparkConf().setAppName("scala").setMaster("local[2]")
> val ctx = new SparkContext(conf)
> val spark = 
> SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
> val seq = Seq(EmptyCTable(EmptyC(), 100L))
> val rdd = ctx.makeRDD[EmptyCTable](seq)
> val ssc = new StreamingContext(ctx, Seconds(1))
> val queue = Queue(rdd)
> val s = ssc.queueStream(queue, false);
> s.foreachRDD((rdd, time) => {
>   if (!rdd.isEmpty) {
> import spark.sqlContext.implicits._
> rdd.toDF.write.mode(SaveMode.Overwrite).saveAsTable("empty_table")
>   }
> })
> ssc.start()
> ssc.awaitTermination()
>   }
> }
> {code}
> 2. Exception
> {noformat}
> Caused by: java.lang.IllegalStateException: Cannot build an empty group
>   at org.apache.parquet.Preconditions.checkState(Preconditions.java:91)
>   at org.apache.parquet.schema.Types$GroupBuilder.build(Types.java:554)
>   at org.apache.parquet.schema.Types$GroupBuilder.build(Types.java:426)
>   at org.apache.parquet.schema.Types$Builder.named(Types.java:228)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convertField(ParquetSchemaConverter.scala:527)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convertField(ParquetSchemaConverter.scala:321)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$$anonfun$convert$1.apply(ParquetSchemaConverter.scala:313)
>   at 
> 

[jira] [Commented] (SPARK-18536) Failed to save to hive table when case class with empty field

2016-11-30 Thread Reynold Xin (JIRA)

[ 
https://issues.apache.org/jira/browse/SPARK-18536?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15709551#comment-15709551
 ] 

Reynold Xin commented on SPARK-18536:
-

We need to add a PreWriteCheck for Parquet.


> Failed to save to hive table when case class with empty field
> -
>
> Key: SPARK-18536
> URL: https://issues.apache.org/jira/browse/SPARK-18536
> Project: Spark
>  Issue Type: Bug
>  Components: SQL
>Affects Versions: 2.0.1
>Reporter: pin_zhang
>
> {code}import scala.collection.mutable.Queue
> import org.apache.spark.SparkConf
> import org.apache.spark.SparkContext
> import org.apache.spark.sql.SaveMode
> import org.apache.spark.sql.SparkSession
> import org.apache.spark.streaming.Seconds
> import org.apache.spark.streaming.StreamingContext
> {code}
> 1. Test code
> {code}
> case class EmptyC()
> case class EmptyCTable(dimensions: EmptyC, timebin: java.lang.Long)
> object EmptyTest {
>   def main(args: Array[String]): Unit = {
> val conf = new SparkConf().setAppName("scala").setMaster("local[2]")
> val ctx = new SparkContext(conf)
> val spark = 
> SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
> val seq = Seq(EmptyCTable(EmptyC(), 100L))
> val rdd = ctx.makeRDD[EmptyCTable](seq)
> val ssc = new StreamingContext(ctx, Seconds(1))
> val queue = Queue(rdd)
> val s = ssc.queueStream(queue, false);
> s.foreachRDD((rdd, time) => {
>   if (!rdd.isEmpty) {
> import spark.sqlContext.implicits._
> rdd.toDF.write.mode(SaveMode.Overwrite).saveAsTable("empty_table")
>   }
> })
> ssc.start()
> ssc.awaitTermination()
>   }
> }
> {code}
> 2. Exception
> {noformat}
> Caused by: java.lang.IllegalStateException: Cannot build an empty group
>   at org.apache.parquet.Preconditions.checkState(Preconditions.java:91)
>   at org.apache.parquet.schema.Types$GroupBuilder.build(Types.java:554)
>   at org.apache.parquet.schema.Types$GroupBuilder.build(Types.java:426)
>   at org.apache.parquet.schema.Types$Builder.named(Types.java:228)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convertField(ParquetSchemaConverter.scala:527)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convertField(ParquetSchemaConverter.scala:321)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$$anonfun$convert$1.apply(ParquetSchemaConverter.scala:313)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$$anonfun$convert$1.apply(ParquetSchemaConverter.scala:313)
>   at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>   at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>   at org.apache.spark.sql.types.StructType.foreach(StructType.scala:95)
>   at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>   at org.apache.spark.sql.types.StructType.map(StructType.scala:95)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convert(ParquetSchemaConverter.scala:313)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport.init(ParquetWriteSupport.scala:85)
>   at 
> org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:288)
>   at 
> org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:262)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.(ParquetFileFormat.scala:562)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:139)
>   at 
> org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
>   at 
> org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:247)
>   at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
>   at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
>   at org.apache.spark.scheduler.Task.run(Task.scala:86)
>   at 

[jira] [Commented] (SPARK-18536) Failed to save to hive table when case class with empty field

2016-11-30 Thread Thomas Sebastian (JIRA)

[ 
https://issues.apache.org/jira/browse/SPARK-18536?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=15708442#comment-15708442
 ] 

Thomas Sebastian commented on SPARK-18536:
--

[~pin_zhang] and [~rxin]
In this case, what we need to do is to stop performing saveAsTable operation, 
rather check the schema class having empty fields. and throw a different 
exception early on.
Please advise.

> Failed to save to hive table when case class with empty field
> -
>
> Key: SPARK-18536
> URL: https://issues.apache.org/jira/browse/SPARK-18536
> Project: Spark
>  Issue Type: Bug
>  Components: SQL
>Affects Versions: 2.0.1
>Reporter: pin_zhang
>
> import scala.collection.mutable.Queue
> import org.apache.spark.SparkConf
> import org.apache.spark.SparkContext
> import org.apache.spark.sql.SaveMode
> import org.apache.spark.sql.SparkSession
> import org.apache.spark.streaming.Seconds
> import org.apache.spark.streaming.StreamingContext
> 1. Test code
> case class EmptyC()
> case class EmptyCTable(dimensions: EmptyC, timebin: java.lang.Long)
> object EmptyTest {
>   def main(args: Array[String]): Unit = {
> val conf = new SparkConf().setAppName("scala").setMaster("local[2]")
> val ctx = new SparkContext(conf)
> val spark = 
> SparkSession.builder().enableHiveSupport().config(conf).getOrCreate()
> val seq = Seq(EmptyCTable(EmptyC(), 100L))
> val rdd = ctx.makeRDD[EmptyCTable](seq)
> val ssc = new StreamingContext(ctx, Seconds(1))
> val queue = Queue(rdd)
> val s = ssc.queueStream(queue, false);
> s.foreachRDD((rdd, time) => {
>   if (!rdd.isEmpty) {
> import spark.sqlContext.implicits._
> rdd.toDF.write.mode(SaveMode.Overwrite).saveAsTable("empty_table")
>   }
> })
> ssc.start()
> ssc.awaitTermination()
>   }
> }
> 2. Exception
> Caused by: java.lang.IllegalStateException: Cannot build an empty group
>   at org.apache.parquet.Preconditions.checkState(Preconditions.java:91)
>   at org.apache.parquet.schema.Types$GroupBuilder.build(Types.java:554)
>   at org.apache.parquet.schema.Types$GroupBuilder.build(Types.java:426)
>   at org.apache.parquet.schema.Types$Builder.named(Types.java:228)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convertField(ParquetSchemaConverter.scala:527)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convertField(ParquetSchemaConverter.scala:321)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$$anonfun$convert$1.apply(ParquetSchemaConverter.scala:313)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter$$anonfun$convert$1.apply(ParquetSchemaConverter.scala:313)
>   at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>   at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>   at org.apache.spark.sql.types.StructType.foreach(StructType.scala:95)
>   at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>   at org.apache.spark.sql.types.StructType.map(StructType.scala:95)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter.convert(ParquetSchemaConverter.scala:313)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport.init(ParquetWriteSupport.scala:85)
>   at 
> org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:288)
>   at 
> org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:262)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.(ParquetFileFormat.scala:562)
>   at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:139)
>   at 
> org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131)
>   at 
> org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:247)
>   at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
>   at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143)
>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)