Tandoy opened a new issue #2668:
URL: https://github.com/apache/hudi/issues/2668


   **Steps to reproduce the behavior:**
   
   1.  `spark-shell  \
        --packages org.apache.spark:spark-avro_2.11:2.4.0  \
        --conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' \
        --jars 
/opt/apps/hudi/packaging/hudi-spark-bundle/target/hudi-spark-bundle_2.11-0.7.0.jar`
   
   
   2.run spark code and insert data 
   
          import org.apache.hudi.QuickstartUtils._
          import scala.collection.JavaConversions._
          import org.apache.spark.sql.SaveMode._
          import org.apache.hudi.DataSourceReadOptions._
          import org.apache.hudi.DataSourceWriteOptions._
          import org.apache.hudi.config.HoodieWriteConfig._
          val tableName = "hudi_trips_cow"
          val basePath = "file:///tmp/hudi_trips_cow"
          val dataGen = new DataGenerator
          val inserts = convertToStringList(dataGen.generateInserts(10))
          val df = spark.read.json(spark.sparkContext.parallelize(inserts, 2))
          df.write.format("org.apache.hudi").
          options(getQuickstartWriteConfigs).
          option(PRECOMBINE_FIELD_OPT_KEY, "ts").
          option(RECORDKEY_FIELD_OPT_KEY, "uuid").
          option(PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
          option(TABLE_NAME, tableName).
          mode(Overwrite).
          save(basePath)
   
   **Expected behavior**
      Generate some new trips, load them into a DataFrame and write the 
DataFrame into the Hudi table as below.
   
   **Environment Description**
   
   - Hudi version : 0.7.0
   - Spark version : 2.4.0.cloudera2
   - Hadoop version : 2.6.0-cdh5.13.3
   - Hive version : 1.1.0-cdh5.13.3
   - Storage (HDFS/S3/GCS..) : HDFS
   - Running on Docker? (yes/no) :no
   
   **Stacktrace**
   
      ` 21/03/12 15:49:08 WARN hudi.HoodieSparkSqlWriter$: hoodie table at 
file:/tmp/hudi_trips_cow already exists. Deleting existing data & overwriting 
with new data.
   21/03/12 15:49:08 WARN util.NativeCodeLoader: Unable to load native-hadoop 
library for your platform... using builtin-java classes where applicable
   org.apache.hudi.exception.HoodieUpsertException: Failed to upsert for commit 
time 20210312154907
     at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:62)
     at 
org.apache.hudi.table.action.commit.SparkUpsertCommitActionExecutor.execute(SparkUpsertCommitActionExecutor.java:46)
     at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:92)
     at 
org.apache.hudi.table.HoodieSparkCopyOnWriteTable.upsert(HoodieSparkCopyOnWriteTable.java:82)
     at 
org.apache.hudi.client.SparkRDDWriteClient.upsert(SparkRDDWriteClient.java:146)
     at 
org.apache.hudi.DataSourceUtils.doWriteOperation(DataSourceUtils.java:214)
     at 
org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:181)
     at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:134)
     at 
org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:45)
     at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
     at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
     at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86)
     at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
     at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
     at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
     at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
     at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
     at 
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:80)
     at 
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:80)
     at 
org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:676)
     at 
org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:676)
     at 
org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
     at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
     at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
     at 
org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:676)
     at 
org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:285)
     at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:271)
     at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:229)
     ... 68 elided
   Caused by: java.lang.NoClassDefFoundError: 
org/apache/parquet/hadoop/metadata/CompressionCodecName
     at java.lang.Class.getDeclaredMethods0(Native Method)
     at java.lang.Class.privateGetDeclaredMethods(Class.java:2701)
     at java.lang.Class.getDeclaredMethod(Class.java:2128)
     at java.io.ObjectStreamClass.getPrivateMethod(ObjectStreamClass.java:1629)
     at java.io.ObjectStreamClass.access$1700(ObjectStreamClass.java:79)
     at java.io.ObjectStreamClass$3.run(ObjectStreamClass.java:520)
     at java.io.ObjectStreamClass$3.run(ObjectStreamClass.java:494)
     at java.security.AccessController.doPrivileged(Native Method)
     at java.io.ObjectStreamClass.<init>(ObjectStreamClass.java:494)
     at java.io.ObjectStreamClass.lookup(ObjectStreamClass.java:391)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1134)
     at 
java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1548)
     at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1509)
     at 
java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1432)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1178)
     at java.io.ObjectOutputStream.writeArray(ObjectOutputStream.java:1378)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1174)
     at 
java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1548)
     at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1509)
     at 
java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1432)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1178)
     at java.io.ObjectOutputStream.writeArray(ObjectOutputStream.java:1378)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1174)
     at 
java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1548)
     at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1509)
     at 
java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1432)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1178)
     at 
java.io.ObjectOutputStream.defaultWriteFields(ObjectOutputStream.java:1548)
     at java.io.ObjectOutputStream.writeSerialData(ObjectOutputStream.java:1509)
     at 
java.io.ObjectOutputStream.writeOrdinaryObject(ObjectOutputStream.java:1432)
     at java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1178)
     at java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:348)
     at 
org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:43)
     at 
org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100)
     at 
org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:400)
     at 
org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:393)
     at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:162)
     at org.apache.spark.SparkContext.clean(SparkContext.scala:2326)
     at org.apache.spark.rdd.RDD$$anonfun$flatMap$1.apply(RDD.scala:380)
     at org.apache.spark.rdd.RDD$$anonfun$flatMap$1.apply(RDD.scala:379)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
     at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
     at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
     at org.apache.spark.rdd.RDD.flatMap(RDD.scala:379)
     at 
org.apache.spark.api.java.JavaRDDLike$class.flatMap(JavaRDDLike.scala:126)
     at 
org.apache.spark.api.java.AbstractJavaRDDLike.flatMap(JavaRDDLike.scala:45)
     at 
org.apache.hudi.client.common.HoodieSparkEngineContext.flatMap(HoodieSparkEngineContext.java:78)
     at 
org.apache.hudi.index.HoodieIndexUtils.getLatestBaseFilesForAllPartitions(HoodieIndexUtils.java:52)
     at 
org.apache.hudi.index.bloom.SparkHoodieBloomIndex.loadInvolvedFiles(SparkHoodieBloomIndex.java:169)
     at 
org.apache.hudi.index.bloom.SparkHoodieBloomIndex.lookupIndex(SparkHoodieBloomIndex.java:119)
     at 
org.apache.hudi.index.bloom.SparkHoodieBloomIndex.tagLocation(SparkHoodieBloomIndex.java:84)
     at 
org.apache.hudi.index.bloom.SparkHoodieBloomIndex.tagLocation(SparkHoodieBloomIndex.java:60)
     at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.tag(AbstractWriteHelper.java:69)
     at 
org.apache.hudi.table.action.commit.AbstractWriteHelper.write(AbstractWriteHelper.java:51)
     ... 96 more
   Caused by: java.lang.ClassNotFoundException: 
org.apache.parquet.hadoop.metadata.CompressionCodecName
     at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
     at java.lang.ClassLoader.loadClass(ClassLoader.java:418)
     at java.lang.ClassLoader.loadClass(ClassLoader.java:351)
     ... 150 more`
       


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to