bookand opened a new issue, #3209: URL: https://github.com/apache/incubator-seatunnel/issues/3209
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/incubator-seatunnel/issues?q=is%3Aissue+label%3A%22bug%22) and found no similar issues. ### What happened when I use seatunnel 2.1.3 to sync data to hbase,there throw an execption java.lang.IllegalAccessError: class org.apache.hadoop.hdfs.web.HftpFileSystem cannot access its superinterface org.apache.hadoop.hdfs.web.TokenAspect$TokenManagementDelegator ### SeaTunnel Version 2.1.3 ### SeaTunnel Config ```conf env { spark.app.name = "data_tube_spark_dw001" spark.executor.instances = 2 spark.executor.cores = 1 spark.executor.memory = "1g" } source { Hbase { hbase.zookeeper.quorum = "hdp004:2181,hdp005:2181,hdp006:2181" zookeeper.znode.parent = "/hbase-unsecure" catalog = "{\"table\":{\"namespace\":\"default\", \"name\":\"dw_datatube_source\"},\"rowkey\":\"id\",\"columns\":{\"id\":{\"cf\":\"rowkey\", \"col\":\"id\", \"type\":\"string\"},\"a\":{\"cf\":\"cf\", \"col\":\"a\", \"type\":\"string\"},\"b\":{\"cf\":\"cf\", \"col\":\"b\", \"type\":\"string\"},\"c\":{\"cf\":\"cf\", \"col\":\"c\", \"type\":\"string\"}}}" result_table_name = "my_dataset" } } transform { sql { sql = "select id,a,b,c from my_dataset" } } sink { hbase { source_table_name = "my_dataset" zookeeper.znode.parent = "/hbase-unsecure" hbase.zookeeper.quorum = "hdp004:2181,hdp005:2181,hdp006:2181" catalog = "{\"table\":{\"namespace\":\"default\", \"name\":\"dw_datatube_sink\"},\"rowkey\":\"id\",\"columns\":{\"id\":{\"cf\":\"rowkey\", \"col\":\"id\", \"type\":\"string\"},\"a\":{\"cf\":\"cf\", \"col\":\"a\", \"type\":\"string\"},\"b\":{\"cf\":\"cf\", \"col\":\"b\", \"type\":\"string\"},\"c\":{\"cf\":\"cf\", \"col\":\"c\", \"type\":\"string\"}}}" save_mode = "overwrite" staging_dir = "/tmp/hbase-staging/" } } ``` ### Running Command ```shell /bin/start-seatunnel-spark.sh --master yarn --deploy-mode client --config ./seatunnel_hbase_hbase.conf ``` ### Error Exception ```log java.lang.IllegalAccessError: class org.apache.hadoop.hdfs.web.HftpFileSystem cannot access its superinterface org.apache.hadoop.hdfs.web.TokenAspect$TokenManagementDelegator at java.lang.ClassLoader.defineClass1(Native Method) at java.lang.ClassLoader.defineClass(ClassLoader.java:756) at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142) at java.net.URLClassLoader.defineClass(URLClassLoader.java:468) at java.net.URLClassLoader.access$100(URLClassLoader.java:74) at java.net.URLClassLoader$1.run(URLClassLoader.java:369) at java.net.URLClassLoader$1.run(URLClassLoader.java:363) at java.security.AccessController.doPrivileged(Native Method) at java.net.URLClassLoader.findClass(URLClassLoader.java:362) at java.lang.ClassLoader.loadClass(ClassLoader.java:418) at java.lang.ClassLoader.loadClass(ClassLoader.java:351) at java.lang.Class.forName0(Native Method) at java.lang.Class.forName(Class.java:348) at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:370) at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404) at java.util.ServiceLoader$1.next(ServiceLoader.java:480) at org.apache.hadoop.fs.FileSystem.loadFileSystems(FileSystem.java:3217) at org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:3262) at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3301) at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:124) at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3352) at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3320) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:479) at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:227) at org.apache.hadoop.hbase.spark.HBaseContext$$anonfun$bulkLoadThinRows$3.apply(HBaseContext.scala:802) at org.apache.hadoop.hbase.spark.HBaseContext$$anonfun$bulkLoadThinRows$3.apply(HBaseContext.scala:799) at org.apache.hadoop.hbase.spark.HBaseContext.org$apache$hadoop$hbase$spark$HBaseContext$$hbaseForeachPartition(HBaseContext.scala:490) at org.apache.hadoop.hbase.spark.HBaseContext$$anonfun$foreachPartition$1.apply(HBaseContext.scala:106) at org.apache.hadoop.hbase.spark.HBaseContext$$anonfun$foreachPartition$1.apply(HBaseContext.scala:106) at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:935) at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:935) at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2079) at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2079) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:109) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` ### Flink or Spark Version hdp 3.1.5 spark 2.3.2 hbase 2.1.6 hadoop 3.1.1 zookeeper-3.4.6 ### Java or Scala Version java 1.8.0_271 ### Screenshots  ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct) -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
