liujinhui1994 removed a comment on issue #3280:
URL: https://github.com/apache/hudi/issues/3280#issuecomment-880625093
```
Diagnostics: | User class threw exception:
org.apache.spark.sql.streaming.StreamingQueryException: Invalid value
org.apache.kafka.common.serialization.ByteArrayDeserializer for configuration
key.deserializer: Class
org.apache.kafka.common.serialization.ByteArrayDeserializer could not be
found.=== Streaming Query ===Identifier: [id =
6c7bb949-bcb6-494a-a576-e64b05643c31, runId =
f93f604d-dfe0-4f1d-a88b-5be64d2fb853]Current Committed Offsets:
{KafkaV2[Subscribe[t3_ts_android_device]]:
{"t3_ts_android_device":{"2":531555,"5":531556,"4":531556,"7":531555,"1":531556,"3":531554,"6":531556,"0":531554}}}Current
Available Offsets: {KafkaV2[Subscribe[t3_ts_android_device]]:
{"t3_ts_android_device":{"2":531555,"5":531556,"4":531556,"7":531555,"1":531556,"3":531554,"6":531556,"0":531554}}}Current
State: ACTIVEThread State: RUNNABLELogical
Plan:KafkaV2[Subscribe[t3_ts_android_device]]at
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExec
ution$runStream(StreamExecution.scala:297)at
org.apache.spark.sql.execution.streaming.StreamExecution$anon$1.run(StreamExecution.scala:193)Caused
by: org.apache.kafka.common.config.ConfigException: Invalid value
org.apache.kafka.common.serialization.ByteArrayDeserializer for configuration
key.deserializer: Class
org.apache.kafka.common.serialization.ByteArrayDeserializer could not be
found.at
org.apache.kafka.common.config.ConfigDef.parseType(ConfigDef.java:724)at
org.apache.kafka.common.config.ConfigDef.parseValue(ConfigDef.java:469)at
org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:462)at
org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:62)at
org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:75)at
org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:499)at
org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:615)at
org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaCon
sumer.java:596)at
org.apache.spark.sql.kafka010.SubscribeStrategy.createConsumer(ConsumerStrategy.scala:62)at
org.apache.spark.sql.kafka010.KafkaOffsetReader.consumer(KafkaOffsetReader.scala:86)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$fetchLatestOffsets$1$anonfun$apply$11.apply(KafkaOffsetReader.scala:217)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$fetchLatestOffsets$1$anonfun$apply$11.apply(KafkaOffsetReader.scala:215)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$withRetriesWithoutInterrupt$1.apply$mcV$sp(KafkaOffsetReader.scala:358)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$withRetriesWithoutInterrupt$1.apply(KafkaOffsetReader.scala:357)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$withRetriesWithoutInterrupt$1.apply(KafkaOffsetReader.scala:357)at
org.apache.spark.u
til.UninterruptibleThread.runUninterruptibly(UninterruptibleThread.scala:77)at
org.apache.spark.sql.kafka010.KafkaOffsetReader.org$apache$spark$sql$kafka010$KafkaOffsetReader$withRetriesWithoutInterrupt(KafkaOffsetReader.scala:356)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$fetchLatestOffsets$1.apply(KafkaOffsetReader.scala:215)at
org.apache.spark.sql.kafka010.KafkaOffsetReader$anonfun$fetchLatestOffsets$1.apply(KafkaOffsetReader.scala:215)at
org.apache.spark.sql.kafka010.KafkaOffsetReader.runUninterruptibly(KafkaOffsetReader.scala:325)at
org.apache.spark.sql.kafka010.KafkaOffsetReader.fetchLatestOffsets(KafkaOffsetReader.scala:214)at
org.apache.spark.sql.kafka010.KafkaMicroBatchReader$anonfun$setOffsetRange$4.apply(KafkaMicroBatchReader.scala:97)at
org.apache.spark.sql.kafka010.KafkaMicroBatchReader$anonfun$setOffsetRange$4.apply(KafkaMicroBatchReader.scala:95)at
scala.Option.getOrElse(Option.scala:121)at
org.apache.spark.sql.kafka010.KafkaMicroBatchReader.setOffsetR
ange(KafkaMicroBatchReader.scala:95)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1$anonfun$6$anonfun$apply$3.apply$mcV$sp(MicroBatchExecution.scala:364)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1$anonfun$6$anonfun$apply$3.apply(MicroBatchExecution.scala:364)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1$anonfun$6$anonfun$apply$3.apply(MicroBatchExecution.scala:364)at
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:351)at
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$ex
ecution$streaming$MicroBatchExecution$constructNextBatch$1$anonfun$6.apply(MicroBatchExecution.scala:360)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1$anonfun$6.apply(MicroBatchExecution.scala:352)at
scala.collection.TraversableLike$anonfun$map$1.apply(TraversableLike.scala:234)at
scala.collection.TraversableLike$anonfun$map$1.apply(TraversableLike.scala:234)at
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)at
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)at
scala.collection.TraversableLike$class.map(TraversableLike.scala:234)at
scala.collection.AbstractTraversable.map(Traversable.scala:104)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1.apply$mcZ$sp(MicroBatchExecution.scala:352)at
org.apache.spark.sql.execution.streaming
.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1.apply(MicroBatchExecution.scala:348)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch$1.apply(MicroBatchExecution.scala:348)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution.withProgressLocked(MicroBatchExecution.scala:568)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution.org$apache$spark$sql$execution$streaming$MicroBatchExecution$constructNextBatch(MicroBatchExecution.scala:348)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$runActivatedStream$1$anonfun$apply$mcZ$sp$1.apply$mcV$sp(MicroBatchExecution.scala:183)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$runActivatedStream$1$anonfun$apply$mcZ$sp$1.apply(MicroBatchExecution.scala:166)at
org.apache.spark.sql.execution.streaming.MicroBatchExecutio
n$anonfun$runActivatedStream$1$anonfun$apply$mcZ$sp$1.apply(MicroBatchExecution.scala:166)at
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:351)at
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution$anonfun$runActivatedStream$1.apply$mcZ$sp(MicroBatchExecution.scala:166)at
org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:56)at
org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:160)at
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$runStream(StreamExecution.scala:281)...
1 more
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]