kunal642 commented on a change in pull request #3866:
URL: https://github.com/apache/carbondata/pull/3866#discussion_r464844335



##########
File path: docs/hive-guide.md
##########
@@ -52,16 +52,11 @@ $HADOOP_HOME/bin/hadoop fs -put sample.csv <hdfs store 
path>/sample.csv
 ```
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.CarbonSession._
-val rootPath = "hdfs:///user/hadoop/carbon"
-val storeLocation = s"$rootPath/store"
-val warehouse = s"$rootPath/warehouse"
-val metaStoreDB = s"$rootPath/metastore_db"
-
-val carbon = 
SparkSession.builder().enableHiveSupport().config("spark.sql.warehouse.dir", 
warehouse).config(org.apache.carbondata.core.constants.CarbonCommonConstants.STORE_LOCATION,
 storeLocation).getOrCreateCarbonSession(storeLocation, metaStoreDB)
-
-carbon.sql("create table hive_carbon(id int, name string, scale decimal, 
country string, salary double) STORED AS carbondata")
-carbon.sql("LOAD DATA INPATH '<hdfs store path>/sample.csv' INTO TABLE 
hive_carbon")
-scala>carbon.sql("SELECT * FROM hive_carbon").show()
+val newSpark = 
SparkSession.builder().config(sc.getConf).enableHiveSupport.config("spark.sql.extensions","org.apache.spark.sql.CarbonExtensions").getOrCreate()
+newSpark.sql("drop table if exists hive_carbon").show
+newSpark.sql("create table hive_carbon(id int, name string, scale decimal, 
country string, salary double) STORED AS carbondata").show
+newSpark.sql("<hdfs store path>/sample.csv INTO TABLE hive_carbon").show

Review comment:
       Please fix the load query, many keywords including load data are missing




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to