[
https://issues.apache.org/jira/browse/CARBONDATA-1726?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Chetan Bhat updated CARBONDATA-1726:
------------------------------------
Description:
Steps :
// prepare csv file for batch loading
cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin
// generate streamSample.csv
100000001,batch_1,city_1,0.1,school_1:school_11$20
100000002,batch_2,city_2,0.2,school_2:school_22$30
100000003,batch_3,city_3,0.3,school_3:school_33$40
100000004,batch_4,city_4,0.4,school_4:school_44$50
100000005,batch_5,city_5,0.5,school_5:school_55$60
// put to hdfs /tmp/streamSample.csv
./hadoop fs -put streamSample.csv /tmp
// spark-beeline
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5
--driver-memory 5G --num-executors 3 --class
org.apache.carbondata.spark.thriftserver.CarbonThriftServer
/srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
"hdfs://hacluster/user/sparkhive/warehouse"
bin/beeline -u jdbc:hive2://10.18.98.34:23040
CREATE TABLE stream_table(
id INT,
name STRING,
city STRING,
salary FLOAT
)
STORED BY 'carbondata'
TBLPROPERTIES('streaming'='true', 'sort_columns'='name');
LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE
stream_table OPTIONS('HEADER'='false');
// spark-shell
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5
--driver-memory 5G --num-executors 3 --jars
/srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
import java.io.{File, PrintWriter}
import java.net.ServerSocket
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
"yyyy/MM/dd")
import org.apache.spark.sql.CarbonSession._
val carbonSession = SparkSession.
builder().
appName("StreamExample").
config("spark.sql.warehouse.dir",
"hdfs://hacluster/user/sparkhive/warehouse").
config("javax.jdo.option.ConnectionURL",
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver").
config("javax.jdo.option.ConnectionPassword", "huawei").
config("javax.jdo.option.ConnectionUserName", "sparksql").
getOrCreateCarbonSession()
carbonSession.sparkContext.setLogLevel("ERROR")
carbonSession.sql("select * from stream_table").show
def writeSocket(serverSocket: ServerSocket): Thread = {
val thread = new Thread() {
override def run(): Unit = {
// wait for client to connection request and accept
val clientSocket = serverSocket.accept()
val socketWriter = new PrintWriter(clientSocket.getOutputStream())
var index = 0
for (_ <- 1 to 1000) {
// write 5 records per iteration
for (_ <- 0 to 100) {
index = index + 1
socketWriter.println(index.toString + ",name_" + index
+ ",city_" + index + "," + (index *
10000.00).toString +
",school_" + index + ":school_" + index + index
+ "$" + index)
}
socketWriter.flush()
Thread.sleep(2000)
}
socketWriter.close()
System.out.println("Socket closed")
}
}
thread.start()
thread
}
def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread = {
val thread = new Thread() {
override def run(): Unit = {
var qry: StreamingQuery = null
try {
val readSocketDF = spark.readStream
.format("socket")
.option("host", "10.18.98.34")
.option("port", 7071)
.load()
// Write data from socket stream to carbondata file
qry = readSocketDF.writeStream
.format("carbondata")
.trigger(ProcessingTime("5 seconds"))
.option("checkpointLocation", tablePath.getStreamingCheckpointDir)
.option("tablePath", tablePath.getPath)
.start()
qry.awaitTermination()
} catch {
case _: InterruptedException =>
println("Done reading and writing streaming data")
} finally {
qry.stop()
}
}
}
thread.start()
thread
}
val streamTableName = s"stream_table"
val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore.
lookupRelation(Some("default"),
streamTableName)(carbonSession).asInstanceOf[CarbonRelation].
tableMeta.carbonTable
val tablePath =
CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier)
val serverSocket = new ServerSocket(7071)
val socketThread = writeSocket(serverSocket)
val streamingThread = startStreaming(carbonSession, tablePath)
**Issue : There is a null pointer exception when streaming is started.*
When the executor and driver cores and memory is increased while launching the
spark shell the issue still occurs.
scala> import java.io.{File, PrintWriter}
import java.io.{File, PrintWriter}
scala> import java.net.ServerSocket
import java.net.ServerSocket
scala>
scala> import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.{CarbonEnv, SparkSession}
scala> import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.hive.CarbonRelation
scala> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
scala>
scala> import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.constants.CarbonCommonConstants
scala> import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.CarbonProperties
scala> import org.apache.carbondata.core.util.path.{CarbonStorePath,
CarbonTablePath}
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
scala>
scala>
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
"yyyy/MM/dd")
res0: org.apache.carbondata.core.util.CarbonProperties =
org.apache.carbondata.core.util.CarbonProperties@7212b28e
scala>
scala> import org.apache.spark.sql.CarbonSession._
import org.apache.spark.sql.CarbonSession._
scala>
scala> val carbonSession = SparkSession.
| builder().
| appName("StreamExample").
| config("spark.sql.warehouse.dir",
"hdfs://hacluster/user/sparkhive/warehouse").
| config("javax.jdo.option.ConnectionURL",
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
| config("javax.jdo.option.ConnectionDriverName",
"com.mysql.jdbc.Driver").
| config("javax.jdo.option.ConnectionPassword", "huawei").
| config("javax.jdo.option.ConnectionUserName", "sparksql").
| getOrCreateCarbonSession()
carbonSession: org.apache.spark.sql.SparkSession =
org.apache.spark.sql.CarbonSession@7593716d
scala>
| carbonSession.sparkContext.setLogLevel("ERROR")
scala>
scala> carbonSession.sql("select * from stream_table").show
+---------+-------+------+------+
| id| name| city|salary|
+---------+-------+------+------+
|100000001|batch_1|city_1| 0.1|
|100000002|batch_2|city_2| 0.2|
|100000003|batch_3|city_3| 0.3|
|100000004|batch_4|city_4| 0.4|
|100000005|batch_5|city_5| 0.5|
+---------+-------+------+------+
scala> def writeSocket(serverSocket: ServerSocket): Thread = {
| val thread = new Thread() {
| override def run(): Unit = {
| // wait for client to connection request and accept
| val clientSocket = serverSocket.accept()
| val socketWriter = new PrintWriter(clientSocket.getOutputStream())
| var index = 0
| for (_ <- 1 to 1000) {
| // write 5 records per iteration
| for (_ <- 0 to 100) {
| index = index + 1
| socketWriter.println(index.toString + ",name_" + index
| + ",city_" + index + "," + (index *
10000.00).toString +
| ",school_" + index + ":school_" + index +
index + "$" + index)
| }
| socketWriter.flush()
| Thread.sleep(2000)
| }
| socketWriter.close()
| System.out.println("Socket closed")
| }
| }
| thread.start()
| thread
| }
writeSocket: (serverSocket: java.net.ServerSocket)Thread
scala>
| def startStreaming(spark: SparkSession, tablePath: CarbonTablePath):
Thread = {
| val thread = new Thread() {
| override def run(): Unit = {
| var qry: StreamingQuery = null
| try {
| val readSocketDF = spark.readStream
| .format("socket")
| .option("host", "10.18.98.34")
| .option("port", 7071)
| .load()
|
| // Write data from socket stream to carbondata file
| qry = readSocketDF.writeStream
| .format("carbondata")
| .trigger(ProcessingTime("5 seconds"))
| .option("checkpointLocation",
tablePath.getStreamingCheckpointDir)
| .option("tablePath", tablePath.getPath)
| .start()
|
| qry.awaitTermination()
| } catch {
| case _: InterruptedException =>
| println("Done reading and writing streaming data")
| } finally {
| qry.stop()
| }
| }
| }
| thread.start()
| thread
| }
startStreaming: (spark: org.apache.spark.sql.SparkSession, tablePath:
org.apache.carbondata.core.util.path.CarbonTablePath)Thread
scala>
scala> val streamTableName = s"stream_table"
streamTableName: String = stream_table
scala>
scala> val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore.
| lookupRelation(Some("default"),
streamTableName)(carbonSession).asInstanceOf[CarbonRelation].
| tableMeta.carbonTable
carbonTable: org.apache.carbondata.core.metadata.schema.table.CarbonTable =
org.apache.carbondata.core.metadata.schema.table.CarbonTable@62cf8fda
scala>
scala> val tablePath =
CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier)
tablePath: org.apache.carbondata.core.util.path.CarbonTablePath =
hdfs://hacluster/user/hive/warehouse/carbon.store/default/stream_table
scala>
scala> val serverSocket = new ServerSocket(7071)
serverSocket: java.net.ServerSocket =
ServerSocket[addr=0.0.0.0/0.0.0.0,localport=7071]
scala> val socketThread = writeSocket(serverSocket)
socketThread: Thread = Thread[Thread-103,5,main]
scala> val streamingThread = startStreaming(carbonSession, tablePath)
streamingThread: Thread = Thread[Thread-104,5,main]
*
**scala> Exception in thread "Thread-104" java.lang.NullPointerException
at
$line29.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anon$1.run(<console>:59)***
Expected : The startstreaming should not throw exception and should be
successful.
was:
Steps :
// prepare csv file for batch loading
cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin
// generate streamSample.csv
100000001,batch_1,city_1,0.1,school_1:school_11$20
100000002,batch_2,city_2,0.2,school_2:school_22$30
100000003,batch_3,city_3,0.3,school_3:school_33$40
100000004,batch_4,city_4,0.4,school_4:school_44$50
100000005,batch_5,city_5,0.5,school_5:school_55$60
// put to hdfs /tmp/streamSample.csv
./hadoop fs -put streamSample.csv /tmp
// spark-beeline
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5
--driver-memory 5G --num-executors 3 --class
org.apache.carbondata.spark.thriftserver.CarbonThriftServer
/srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
"hdfs://hacluster/user/sparkhive/warehouse"
bin/beeline -u jdbc:hive2://10.18.98.34:23040
CREATE TABLE stream_table(
id INT,
name STRING,
city STRING,
salary FLOAT
)
STORED BY 'carbondata'
TBLPROPERTIES('streaming'='true', 'sort_columns'='name');
LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE
stream_table OPTIONS('HEADER'='false');
// spark-shell
cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5
--driver-memory 5G --num-executors 3 --jars
/srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
import java.io.{File, PrintWriter}
import java.net.ServerSocket
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
"yyyy/MM/dd")
import org.apache.spark.sql.CarbonSession._
val carbonSession = SparkSession.
builder().
appName("StreamExample").
config("spark.sql.warehouse.dir",
"hdfs://hacluster/user/sparkhive/warehouse").
config("javax.jdo.option.ConnectionURL",
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver").
config("javax.jdo.option.ConnectionPassword", "huawei").
config("javax.jdo.option.ConnectionUserName", "sparksql").
getOrCreateCarbonSession()
carbonSession.sparkContext.setLogLevel("ERROR")
carbonSession.sql("select * from stream_table").show
def writeSocket(serverSocket: ServerSocket): Thread = {
val thread = new Thread() {
override def run(): Unit = {
// wait for client to connection request and accept
val clientSocket = serverSocket.accept()
val socketWriter = new PrintWriter(clientSocket.getOutputStream())
var index = 0
for (_ <- 1 to 1000) {
// write 5 records per iteration
for (_ <- 0 to 100) {
index = index + 1
socketWriter.println(index.toString + ",name_" + index
+ ",city_" + index + "," + (index *
10000.00).toString +
",school_" + index + ":school_" + index + index
+ "$" + index)
}
socketWriter.flush()
Thread.sleep(2000)
}
socketWriter.close()
System.out.println("Socket closed")
}
}
thread.start()
thread
}
def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread = {
val thread = new Thread() {
override def run(): Unit = {
var qry: StreamingQuery = null
try {
val readSocketDF = spark.readStream
.format("socket")
.option("host", "10.18.98.34")
.option("port", 7071)
.load()
// Write data from socket stream to carbondata file
qry = readSocketDF.writeStream
.format("carbondata")
.trigger(ProcessingTime("5 seconds"))
.option("checkpointLocation", tablePath.getStreamingCheckpointDir)
.option("tablePath", tablePath.getPath)
.start()
qry.awaitTermination()
} catch {
case _: InterruptedException =>
println("Done reading and writing streaming data")
} finally {
qry.stop()
}
}
}
thread.start()
thread
}
val streamTableName = s"stream_table"
val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore.
lookupRelation(Some("default"),
streamTableName)(carbonSession).asInstanceOf[CarbonRelation].
tableMeta.carbonTable
val tablePath =
CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier)
val serverSocket = new ServerSocket(7071)
val socketThread = writeSocket(serverSocket)
val streamingThread = startStreaming(carbonSession, tablePath)
**Issue : There is a null pointer exception when streaming is started.*
When the executor and driver cores and memory is increased while launching the
spark shell the issue still occurs.
scala> import java.io.{File, PrintWriter}
import java.io.{File, PrintWriter}
scala> import java.net.ServerSocket
import java.net.ServerSocket
scala>
scala> import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.{CarbonEnv, SparkSession}
scala> import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.hive.CarbonRelation
scala> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
scala>
scala> import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.constants.CarbonCommonConstants
scala> import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.CarbonProperties
scala> import org.apache.carbondata.core.util.path.{CarbonStorePath,
CarbonTablePath}
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
scala>
scala>
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
"yyyy/MM/dd")
res0: org.apache.carbondata.core.util.CarbonProperties =
org.apache.carbondata.core.util.CarbonProperties@7212b28e
scala>
scala> import org.apache.spark.sql.CarbonSession._
import org.apache.spark.sql.CarbonSession._
scala>
scala> val carbonSession = SparkSession.
| builder().
| appName("StreamExample").
| config("spark.sql.warehouse.dir",
"hdfs://hacluster/user/sparkhive/warehouse").
| config("javax.jdo.option.ConnectionURL",
"jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
| config("javax.jdo.option.ConnectionDriverName",
"com.mysql.jdbc.Driver").
| config("javax.jdo.option.ConnectionPassword", "huawei").
| config("javax.jdo.option.ConnectionUserName", "sparksql").
| getOrCreateCarbonSession()
carbonSession: org.apache.spark.sql.SparkSession =
org.apache.spark.sql.CarbonSession@7593716d
scala>
| carbonSession.sparkContext.setLogLevel("ERROR")
scala>
scala> carbonSession.sql("select * from stream_table").show
+---------+-------+------+------+
| id| name| city|salary|
+---------+-------+------+------+
|100000001|batch_1|city_1| 0.1|
|100000002|batch_2|city_2| 0.2|
|100000003|batch_3|city_3| 0.3|
|100000004|batch_4|city_4| 0.4|
|100000005|batch_5|city_5| 0.5|
+---------+-------+------+------+
scala> def writeSocket(serverSocket: ServerSocket): Thread = {
| val thread = new Thread() {
| override def run(): Unit = {
| // wait for client to connection request and accept
| val clientSocket = serverSocket.accept()
| val socketWriter = new PrintWriter(clientSocket.getOutputStream())
| var index = 0
| for (_ <- 1 to 1000) {
| // write 5 records per iteration
| for (_ <- 0 to 100) {
| index = index + 1
| socketWriter.println(index.toString + ",name_" + index
| + ",city_" + index + "," + (index *
10000.00).toString +
| ",school_" + index + ":school_" + index +
index + "$" + index)
| }
| socketWriter.flush()
| Thread.sleep(2000)
| }
| socketWriter.close()
| System.out.println("Socket closed")
| }
| }
| thread.start()
| thread
| }
writeSocket: (serverSocket: java.net.ServerSocket)Thread
scala>
| def startStreaming(spark: SparkSession, tablePath: CarbonTablePath):
Thread = {
| val thread = new Thread() {
| override def run(): Unit = {
| var qry: StreamingQuery = null
| try {
| val readSocketDF = spark.readStream
| .format("socket")
| .option("host", "10.18.98.34")
| .option("port", 7071)
| .load()
|
| // Write data from socket stream to carbondata file
| qry = readSocketDF.writeStream
| .format("carbondata")
| .trigger(ProcessingTime("5 seconds"))
| .option("checkpointLocation",
tablePath.getStreamingCheckpointDir)
| .option("tablePath", tablePath.getPath)
| .start()
|
| qry.awaitTermination()
| } catch {
| case _: InterruptedException =>
| println("Done reading and writing streaming data")
| } finally {
| qry.stop()
| }
| }
| }
| thread.start()
| thread
| }
startStreaming: (spark: org.apache.spark.sql.SparkSession, tablePath:
org.apache.carbondata.core.util.path.CarbonTablePath)Thread
scala>
scala> val streamTableName = s"stream_table"
streamTableName: String = stream_table
scala>
scala> val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore.
| lookupRelation(Some("default"),
streamTableName)(carbonSession).asInstanceOf[CarbonRelation].
| tableMeta.carbonTable
carbonTable: org.apache.carbondata.core.metadata.schema.table.CarbonTable =
org.apache.carbondata.core.metadata.schema.table.CarbonTable@62cf8fda
scala>
scala> val tablePath =
CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier)
tablePath: org.apache.carbondata.core.util.path.CarbonTablePath =
hdfs://hacluster/user/hive/warehouse/carbon.store/default/stream_table
scala>
scala> val serverSocket = new ServerSocket(7071)
serverSocket: java.net.ServerSocket =
ServerSocket[addr=0.0.0.0/0.0.0.0,localport=7071]
scala> val socketThread = writeSocket(serverSocket)
socketThread: Thread = Thread[Thread-103,5,main]
scala> val streamingThread = startStreaming(carbonSession, tablePath)
streamingThread: Thread = Thread[Thread-104,5,main]
*
*scala> Exception in thread "Thread-104" java.lang.NullPointerException
at
$line29.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anon$1.run(<console>:59)**
Expected : The startstreaming should not throw exception and should be
successful.
> Carbon1.3.0-Streaming - Select query from spark-shell does not execute
> successfully for streaming table load
> ------------------------------------------------------------------------------------------------------------
>
> Key: CARBONDATA-1726
> URL: https://issues.apache.org/jira/browse/CARBONDATA-1726
> Project: CarbonData
> Issue Type: Bug
> Components: data-query
> Affects Versions: 1.3.0
> Environment: 3 node ant cluster SUSE 11 SP4
> Reporter: Chetan Bhat
> Priority: Blocker
> Labels: Functional
>
> Steps :
> // prepare csv file for batch loading
> cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin
> // generate streamSample.csv
> 100000001,batch_1,city_1,0.1,school_1:school_11$20
> 100000002,batch_2,city_2,0.2,school_2:school_22$30
> 100000003,batch_3,city_3,0.3,school_3:school_33$40
> 100000004,batch_4,city_4,0.4,school_4:school_44$50
> 100000005,batch_5,city_5,0.5,school_5:school_55$60
> // put to hdfs /tmp/streamSample.csv
> ./hadoop fs -put streamSample.csv /tmp
> // spark-beeline
> cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
> bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores
> 5 --driver-memory 5G --num-executors 3 --class
> org.apache.carbondata.spark.thriftserver.CarbonThriftServer
> /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
> "hdfs://hacluster/user/sparkhive/warehouse"
> bin/beeline -u jdbc:hive2://10.18.98.34:23040
> CREATE TABLE stream_table(
> id INT,
> name STRING,
> city STRING,
> salary FLOAT
> )
> STORED BY 'carbondata'
> TBLPROPERTIES('streaming'='true', 'sort_columns'='name');
> LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE
> stream_table OPTIONS('HEADER'='false');
> // spark-shell
> cd /srv/spark2.2Bigdata/install/spark/sparkJdbc
> bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5
> --driver-memory 5G --num-executors 3 --jars
> /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
> import java.io.{File, PrintWriter}
> import java.net.ServerSocket
> import org.apache.spark.sql.{CarbonEnv, SparkSession}
> import org.apache.spark.sql.hive.CarbonRelation
> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
> import org.apache.carbondata.core.constants.CarbonCommonConstants
> import org.apache.carbondata.core.util.CarbonProperties
> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
> "yyyy/MM/dd")
> import org.apache.spark.sql.CarbonSession._
> val carbonSession = SparkSession.
> builder().
> appName("StreamExample").
> config("spark.sql.warehouse.dir",
> "hdfs://hacluster/user/sparkhive/warehouse").
> config("javax.jdo.option.ConnectionURL",
> "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
> config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver").
> config("javax.jdo.option.ConnectionPassword", "huawei").
> config("javax.jdo.option.ConnectionUserName", "sparksql").
> getOrCreateCarbonSession()
>
> carbonSession.sparkContext.setLogLevel("ERROR")
> carbonSession.sql("select * from stream_table").show
> def writeSocket(serverSocket: ServerSocket): Thread = {
> val thread = new Thread() {
> override def run(): Unit = {
> // wait for client to connection request and accept
> val clientSocket = serverSocket.accept()
> val socketWriter = new PrintWriter(clientSocket.getOutputStream())
> var index = 0
> for (_ <- 1 to 1000) {
> // write 5 records per iteration
> for (_ <- 0 to 100) {
> index = index + 1
> socketWriter.println(index.toString + ",name_" + index
> + ",city_" + index + "," + (index *
> 10000.00).toString +
> ",school_" + index + ":school_" + index +
> index + "$" + index)
> }
> socketWriter.flush()
> Thread.sleep(2000)
> }
> socketWriter.close()
> System.out.println("Socket closed")
> }
> }
> thread.start()
> thread
> }
>
> def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread =
> {
> val thread = new Thread() {
> override def run(): Unit = {
> var qry: StreamingQuery = null
> try {
> val readSocketDF = spark.readStream
> .format("socket")
> .option("host", "10.18.98.34")
> .option("port", 7071)
> .load()
> // Write data from socket stream to carbondata file
> qry = readSocketDF.writeStream
> .format("carbondata")
> .trigger(ProcessingTime("5 seconds"))
> .option("checkpointLocation", tablePath.getStreamingCheckpointDir)
> .option("tablePath", tablePath.getPath)
> .start()
> qry.awaitTermination()
> } catch {
> case _: InterruptedException =>
> println("Done reading and writing streaming data")
> } finally {
> qry.stop()
> }
> }
> }
> thread.start()
> thread
> }
> val streamTableName = s"stream_table"
> val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore.
> lookupRelation(Some("default"),
> streamTableName)(carbonSession).asInstanceOf[CarbonRelation].
> tableMeta.carbonTable
> val tablePath =
> CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier)
> val serverSocket = new ServerSocket(7071)
> val socketThread = writeSocket(serverSocket)
> val streamingThread = startStreaming(carbonSession, tablePath)
> **Issue : There is a null pointer exception when streaming is started.*
> When the executor and driver cores and memory is increased while launching
> the spark shell the issue still occurs.
> scala> import java.io.{File, PrintWriter}
> import java.io.{File, PrintWriter}
> scala> import java.net.ServerSocket
> import java.net.ServerSocket
> scala>
> scala> import org.apache.spark.sql.{CarbonEnv, SparkSession}
> import org.apache.spark.sql.{CarbonEnv, SparkSession}
> scala> import org.apache.spark.sql.hive.CarbonRelation
> import org.apache.spark.sql.hive.CarbonRelation
> scala> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery}
> scala>
> scala> import org.apache.carbondata.core.constants.CarbonCommonConstants
> import org.apache.carbondata.core.constants.CarbonCommonConstants
> scala> import org.apache.carbondata.core.util.CarbonProperties
> import org.apache.carbondata.core.util.CarbonProperties
> scala> import org.apache.carbondata.core.util.path.{CarbonStorePath,
> CarbonTablePath}
> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
> scala>
> scala>
> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
> "yyyy/MM/dd")
> res0: org.apache.carbondata.core.util.CarbonProperties =
> org.apache.carbondata.core.util.CarbonProperties@7212b28e
> scala>
> scala> import org.apache.spark.sql.CarbonSession._
> import org.apache.spark.sql.CarbonSession._
> scala>
> scala> val carbonSession = SparkSession.
> | builder().
> | appName("StreamExample").
> | config("spark.sql.warehouse.dir",
> "hdfs://hacluster/user/sparkhive/warehouse").
> | config("javax.jdo.option.ConnectionURL",
> "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8").
> | config("javax.jdo.option.ConnectionDriverName",
> "com.mysql.jdbc.Driver").
> | config("javax.jdo.option.ConnectionPassword", "huawei").
> | config("javax.jdo.option.ConnectionUserName", "sparksql").
> | getOrCreateCarbonSession()
> carbonSession: org.apache.spark.sql.SparkSession =
> org.apache.spark.sql.CarbonSession@7593716d
> scala>
> | carbonSession.sparkContext.setLogLevel("ERROR")
> scala>
> scala> carbonSession.sql("select * from stream_table").show
> +---------+-------+------+------+
> | id| name| city|salary|
> +---------+-------+------+------+
> |100000001|batch_1|city_1| 0.1|
> |100000002|batch_2|city_2| 0.2|
> |100000003|batch_3|city_3| 0.3|
> |100000004|batch_4|city_4| 0.4|
> |100000005|batch_5|city_5| 0.5|
> +---------+-------+------+------+
> scala> def writeSocket(serverSocket: ServerSocket): Thread = {
> | val thread = new Thread() {
> | override def run(): Unit = {
> | // wait for client to connection request and accept
> | val clientSocket = serverSocket.accept()
> | val socketWriter = new
> PrintWriter(clientSocket.getOutputStream())
> | var index = 0
> | for (_ <- 1 to 1000) {
> | // write 5 records per iteration
> | for (_ <- 0 to 100) {
> | index = index + 1
> | socketWriter.println(index.toString + ",name_" + index
> | + ",city_" + index + "," + (index *
> 10000.00).toString +
> | ",school_" + index + ":school_" + index
> + index + "$" + index)
> | }
> | socketWriter.flush()
> | Thread.sleep(2000)
> | }
> | socketWriter.close()
> | System.out.println("Socket closed")
> | }
> | }
> | thread.start()
> | thread
> | }
> writeSocket: (serverSocket: java.net.ServerSocket)Thread
> scala>
> | def startStreaming(spark: SparkSession, tablePath: CarbonTablePath):
> Thread = {
> | val thread = new Thread() {
> | override def run(): Unit = {
> | var qry: StreamingQuery = null
> | try {
> | val readSocketDF = spark.readStream
> | .format("socket")
> | .option("host", "10.18.98.34")
> | .option("port", 7071)
> | .load()
> |
> | // Write data from socket stream to carbondata file
> | qry = readSocketDF.writeStream
> | .format("carbondata")
> | .trigger(ProcessingTime("5 seconds"))
> | .option("checkpointLocation",
> tablePath.getStreamingCheckpointDir)
> | .option("tablePath", tablePath.getPath)
> | .start()
> |
> | qry.awaitTermination()
> | } catch {
> | case _: InterruptedException =>
> | println("Done reading and writing streaming data")
> | } finally {
> | qry.stop()
> | }
> | }
> | }
> | thread.start()
> | thread
> | }
> startStreaming: (spark: org.apache.spark.sql.SparkSession, tablePath:
> org.apache.carbondata.core.util.path.CarbonTablePath)Thread
> scala>
> scala> val streamTableName = s"stream_table"
> streamTableName: String = stream_table
> scala>
> scala> val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore.
> | lookupRelation(Some("default"),
> streamTableName)(carbonSession).asInstanceOf[CarbonRelation].
> | tableMeta.carbonTable
> carbonTable: org.apache.carbondata.core.metadata.schema.table.CarbonTable =
> org.apache.carbondata.core.metadata.schema.table.CarbonTable@62cf8fda
> scala>
> scala> val tablePath =
> CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier)
> tablePath: org.apache.carbondata.core.util.path.CarbonTablePath =
> hdfs://hacluster/user/hive/warehouse/carbon.store/default/stream_table
> scala>
> scala> val serverSocket = new ServerSocket(7071)
> serverSocket: java.net.ServerSocket =
> ServerSocket[addr=0.0.0.0/0.0.0.0,localport=7071]
> scala> val socketThread = writeSocket(serverSocket)
> socketThread: Thread = Thread[Thread-103,5,main]
> scala> val streamingThread = startStreaming(carbonSession, tablePath)
> streamingThread: Thread = Thread[Thread-104,5,main]
> *
> **scala> Exception in thread "Thread-104" java.lang.NullPointerException
> at
> $line29.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anon$1.run(<console>:59)***
> Expected : The startstreaming should not throw exception and should be
> successful.
--
This message was sent by Atlassian JIRA
(v6.4.14#64029)