hi,all

The environment I use is CDH6.3,
Use hadoop3 maven dependency to compile hudi,  
<hadoop.version>3.0.0</hadoop.version>
execute quickstart, 
bin / spark-shell --jars 
/home/t3cx/apps/hudi/hudi-spark-bundle-0.5.1-SNAPSHOT.jar --conf 
'spark.serializer = org.apache.spark.serializer.KryoSerializer'

import org.apache.hudi.QuickstartUtils._
import scala.collection.JavaConversions._
import org.apache.spark.sql.SaveMode._
import org.apache.hudi.DataSourceReadOptions._
import org.apache.hudi.DataSourceWriteOptions._
import org.apache.hudi.config.HoodieWriteConfig._

val tableName = "hudi_cow_table"
val basePath = "file: /// tmp / hudi_cow_table"
val dataGen = new DataGenerator

val inserts = convertToStringList (dataGen.generateInserts (10))
val df = spark.read.json (spark.sparkContext.parallelize (inserts, 2))
df.write.format ("org.apache.hudi").
    options (getQuickstartWriteConfigs).
    option (PRECOMBINE_FIELD_OPT_KEY, "ts").
    option (RECORDKEY_FIELD_OPT_KEY, "uuid").
    option (PARTITIONPATH_FIELD_OPT_KEY, "partitionpath").
    option (TABLE_NAME, tableName).
    mode (Overwrite).
    save (basePath);

/ tmp / hudi_cow_table / No data in this directory

please help


[email protected]

Reply via email to