[
https://issues.apache.org/jira/browse/PHOENIX-7065?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17787192#comment-17787192
]
Istvan Toth commented on PHOENIX-7065:
--------------------------------------
It's a protobuf conflict:
{noformat}
05:07:47.444 [FSImageSaver for
/home/stoty/workspaces/apache-phoenix/phoenix-connectors/phoenix5-spark3-it/target/test-data/3b84e2b7-029c-8399-3a95-2671bd06fd99/cluster_46b47f84-890f-566b-f4f2-181e676d3ac0/dfs/name-0-2
of type IMAGE_AND_EDITS] ERROR org.apache.hadoop.hdfs.server.namenode.FSImage
- Unable to save image for
/home/stoty/workspaces/apache-phoenix/phoenix-connectors/phoenix5-spark3-it/target/test-data/3b84e2b7-029c-8399-3a95-2671bd06fd99/cluster_46b47f84-890f-566b-f4f2-181e676d3ac0/dfs/name-0-2
java.lang.VerifyError: Bad type on operand stack
Exception Details:
Location:
org/apache/hadoop/hdfs/server/namenode/FsImageProto$ErasureCodingSection$Builder.addPolicies(Lorg/apache/hadoop/hdfs/protocol/proto/HdfsProtos$ErasureCodingPolicyProto;)Lorg/apache/hadoop/hdfs/server/namenode/FsImageProto$ErasureCodingSection$Builder;
@46: invokevirtual
Reason:
Type
'org/apache/hadoop/hdfs/protocol/proto/HdfsProtos$ErasureCodingPolicyProto'
(current frame, stack[1]) is not assignable to
'com/google/protobuf/GeneratedMessage'
Current Frame:
bci: @46
flags: { }
locals: {
'org/apache/hadoop/hdfs/server/namenode/FsImageProto$ErasureCodingSection$Builder',
'org/apache/hadoop/hdfs/protocol/proto/HdfsProtos$ErasureCodingPolicyProto' }
stack: { 'com/google/protobuf/RepeatedFieldBuilder',
'org/apache/hadoop/hdfs/protocol/proto/HdfsProtos$ErasureCodingPolicyProto' }
{noformat}
It is probably only a test issue, the shaded connector artifact shouldn't have
either hbase or hadoop code in it.
> Spark3 connector tests fail with Spark 3.4.1
> --------------------------------------------
>
> Key: PHOENIX-7065
> URL: https://issues.apache.org/jira/browse/PHOENIX-7065
> Project: Phoenix
> Issue Type: New Feature
> Components: connectors, spark-connector
> Affects Versions: connectors-6.0.0
> Reporter: Istvan Toth
> Priority: Major
>
> Probably some kind of dependency version conflict with minicluster.
> {noformat}
> [INFO] Running org.apache.phoenix.spark.SaltedTableIT
> [ERROR] Tests run: 1, Failures: 0, Errors: 1, Skipped: 0, Time elapsed: 0.008
> s <<< FAILURE! - in org.apache.phoenix.spark.SaltedTableIT
> [ERROR] org.apache.phoenix.spark.SaltedTableIT Time elapsed: 0.002 s <<<
> ERROR!
> java.lang.RuntimeException: java.io.IOException: Failed to save in any
> storage directories while saving namespace.
> at org.apache.phoenix.query.BaseTest.initMiniCluster(BaseTest.java:549)
> at org.apache.phoenix.query.BaseTest.setUpTestCluster(BaseTest.java:449)
> at
> org.apache.phoenix.query.BaseTest.checkClusterInitialized(BaseTest.java:435)
> at org.apache.phoenix.query.BaseTest.setUpTestDriver(BaseTest.java:517)
> at org.apache.phoenix.query.BaseTest.setUpTestDriver(BaseTest.java:512)
> at
> org.apache.phoenix.end2end.ParallelStatsDisabledIT.doSetup(ParallelStatsDisabledIT.java:62)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
> at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
> at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
> at
> org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33)
> at
> org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
> at
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
> at org.apache.phoenix.SystemExitRule$1.evaluate(SystemExitRule.java:40)
> at org.junit.rules.ExternalResource$1.evaluate(ExternalResource.java:54)
> at org.junit.rules.RunRules.evaluate(RunRules.java:20)
> at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
> at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
> at org.junit.runners.Suite.runChild(Suite.java:128)
> at org.junit.runners.Suite.runChild(Suite.java:27)
> at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
> at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
> at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
> at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
> at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
> at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
> at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
> at org.apache.maven.surefire.junitcore.JUnitCore.run(JUnitCore.java:49)
> at
> org.apache.maven.surefire.junitcore.JUnitCoreWrapper.createRequestAndRun(JUnitCoreWrapper.java:120)
> at
> org.apache.maven.surefire.junitcore.JUnitCoreWrapper.executeLazy(JUnitCoreWrapper.java:105)
> at
> org.apache.maven.surefire.junitcore.JUnitCoreWrapper.execute(JUnitCoreWrapper.java:77)
> at
> org.apache.maven.surefire.junitcore.JUnitCoreWrapper.execute(JUnitCoreWrapper.java:69)
> at
> org.apache.maven.surefire.junitcore.JUnitCoreProvider.invoke(JUnitCoreProvider.java:146)
> at
> org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385)
> at
> org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162)
> at
> org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507)
> at
> org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495)
> Caused by: java.io.IOException: Failed to save in any storage directories
> while saving namespace.
> at
> org.apache.hadoop.hdfs.server.namenode.FSImage.saveFSImageInAllDirs(FSImage.java:1192)
> at
> org.apache.hadoop.hdfs.server.namenode.FSImage.saveFSImageInAllDirs(FSImage.java:1149)
> at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:175)
> at
> org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1138)
> at
> org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:402)
> at org.apache.hadoop.hdfs.DFSTestUtil.formatNameNode(DFSTestUtil.java:248)
> at
> org.apache.hadoop.hdfs.MiniDFSCluster.configureNameService(MiniDFSCluster.java:1063)
> at
> org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:949)
> at
> org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:881)
> at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:797)
> at
> org.apache.hadoop.hbase.HBaseTestingUtility.startMiniDFSCluster(HBaseTestingUtility.java:673)
> at
> org.apache.hadoop.hbase.HBaseTestingUtility.startMiniDFSCluster(HBaseTestingUtility.java:645)
> at
> org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:996)
> at
> org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:876)
> at
> org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:858)
> at
> org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:840)
> at org.apache.phoenix.query.BaseTest.initMiniCluster(BaseTest.java:544)
> ... 39 more
> {noformat}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)