nshigit opened a new issue, #18320:
URL: https://github.com/apache/hudi/issues/18320

   ### Bug Description
   
   **What happened:**
   execute create table in spark-sql
    CREATE TABLE IF NOT EXISTS pos_order (                                      
                                                                                
                                                                                
               
        order_no STRING NOT NULL,
         opdate STRING NOT NULL
   )
   USING HUDI                                                                   
                                                                                
                                                                                
              PARTITIONED BY (opdate)                                           
                                                                                
                                                                                
             
   LOCATION 's3a://mytest/datalake/polaris/default/pos_order/'; 
   
   None.get                                                                     
                                                                                
                                                                                
 11:24:35 [27/1899]
   java.util.NoSuchElementException: None.get                                   
                                                   
           at scala.None$.get(Option.scala:529)                                 
                                                   
           at scala.None$.get(Option.scala:527)                                 
                                                   
           at 
org.apache.polaris.spark.utils.PolarisCatalogUtils.loadSparkTable(PolarisCatalogUtils.java:73)
  
           at 
org.apache.polaris.spark.PolarisSparkCatalog.loadTable(PolarisSparkCatalog.java:74)
                         
           at 
org.apache.polaris.spark.SparkCatalog.loadTable(SparkCatalog.java:140)          
                                    
           at 
org.apache.spark.sql.connector.catalog.CatalogV2Util$.getTable(CatalogV2Util.scala:363)
           at 
org.apache.spark.sql.connector.catalog.CatalogV2Util$.loadTable(CatalogV2Util.scala:337)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.$anonfun$resolveRelation$5(Analyzer.scala:1319)
           at scala.Option.orElse(Option.scala:447)                             
                                                   
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.$anonfun$resolveRelation$1(Analyzer.scala:1315)
           at scala.Option.orElse(Option.scala:447)                             
                                                   
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.org$apache$spark$sql$catalyst$analysis$Analyzer$ResolveRelations$$resolveRelation(Analyzer.scala:1300)
                                                                             
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$14.applyOrElse(Analyzer.scala:1157)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$14.applyOrElse(Analyzer.scala:1121)
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
                                                                                
                                        
           at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:76)  
                
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
                                                                                
                                        
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
                                                                                
                                              
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
           at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:32)
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$2(AnalysisHelper.scala:135)
                                                                                
                                        
           at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1216)
           at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1215)
           at 
org.apache.spark.sql.catalyst.plans.logical.Project.mapChildren(basicLogicalOperators.scala:71)
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:135)
                                                                                
                                        
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
                                                                                
                                              
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
           at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:32)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:1121)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:1080)
           at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
           at 
scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
           at 
scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
           at scala.collection.immutable.List.foldLeft(List.scala:91)
           at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
           at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
           at scala.collection.immutable.List.foreach(List.scala:431)
           at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:211)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:240)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:236)
           at 
org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:187)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:236)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:202)
           at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:182)
           at 
org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:89)
           at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:182)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:223)
           at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
           at 
org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:222)
           at 
org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:77)
           at 
org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:138)
           at 
org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:219)
           at 
org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:546)
           at 
org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:219)
           at 
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
           at 
org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:218)
           at 
org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:77)
           at 
org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:74)
           at 
org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:66)
           at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:99)
           at 
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
           at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
           at 
org.apache.spark.sql.SparkSession.$anonfun$sql$4(SparkSession.scala:691)
           at 
org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
           at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:682)
           at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:713)
           at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:744)
           at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:651)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:68)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:501)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.$anonfun$processLine$1(SparkSQLCLIDriver.scala:619)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.$anonfun$processLine$1$adapted(SparkSQLCLIDriver.scala:613)
           at scala.collection.Iterator.foreach(Iterator.scala:943) 
           at scala.collection.Iterator.foreach$(Iterator.scala:943)
           at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
           at scala.collection.IterableLike.foreach(IterableLike.scala:74)
           at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
           at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processLine(SparkSQLCLIDriver.scala:613)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:310)
           at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)
           at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
           at 
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:75)
           at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:52)
           at java.base/java.lang.reflect.Method.invoke(Method.java:580)
           at 
org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
           at 
org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:1034)
           at 
org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:199)
           at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:222)
           at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:91)
           at 
org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1125)
           at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1134)
           at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
   
   After exception, "show tables;"  can output pos_order;
    show tables;
   pos_order
   Time taken: 0.113 seconds, Fetched 1 row(s)
   
   But 'select * from pos_order' get the same exception.
   
   **What you expected:**
   
   **Steps to reproduce:**
   1. environment
   JDK java 21.0.8 2025-07-15 LTS
   Java(TM) SE Runtime Environment (build 21.0.8+12-LTS-250)
   Java HotSpot(TM) 64-Bit Server VM (build 21.0.8+12-LTS-250, mixed mode, 
sharing)
   
   spark-3.5.8-bin-hadoop3/
   polaris-bin-1.3.0-incubating/
   
   $SPARK_HOME/bin/spark-sql \
   --packages 
"org.apache.hudi:hudi-spark3.5-bundle_2.12:1.1.1,org.apache.polaris:polaris-spark-3.5_2.12:1.3.0-incubating,org.apache.hadoop:hadoop-aws:3.3.4,com.amazonaws:aws-java-sdk-bundle:1.12.262"
 \
   --conf spark.jars.ivy=$SPARK_HOME/ivy2 \
   --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
   --conf spark.sql.extensions= 
org.apache.spark.sql.hudi.HoodieSparkSessionExtension \
   --conf spark.sql.warehouse.dir=s3a://test/datalake \
   --conf 
spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog 
\
   --conf 
spark.sql.catalog.polaris_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog
 \
   --conf 
hoodie.spark.polaris.catalog.class=org.apache.spark.sql.hudi.catalog.HoodieCatalog
 \
   --conf spark.sql.defaultCatalog=AMA \
   --conf spark.sql.catalog.AMA=org.apache.polaris.spark.SparkCatalog \
   --conf spark.sql.catalog.AMA.type=rest \
   --conf spark.sql.catalog.AMA.uri=http://127.0.0.1:8181/api/catalog \
   --conf spark.sql.catalog.AMA.warehouse=AMA \
   --conf spark.sql.catalog.AMA.scope=PRINCIPAL_ROLE:ALL \
   --conf spark.sql.catalog.AMA.rest.auth.type=oauth2 \
   --conf spark.sql.catalog.AMA.credential=root:s3cr3t \
   --conf 
spark.sql.catalog.AMA.oauth2-server-uri=http://127.0.0.1:8181/api/catalog/v1/oauth/tokens
 \
   --conf 
spark.sql.catalog.AMA.s3a.endpoint=http://oss-cn-hangzhou.aliyuncs.com \
   --conf spark.sql.catalog.AMA.s3a.path-style-access=false \
   --conf spark.sql.catalog.AMA.s3a.access-key-id=xxxxxxx \
   --conf spark.sql.catalog.AMA.s3a.secret-access-key=xxxxxxx \
   --conf spark.sql.catalog.AMA.client.region=irrelevant \
   --conf spark.hadoop.fs.s3a.access.key=xxxxx \
   --conf spark.hadoop.fs.s3a.secret.key=xxxxx \
   --conf spark.hadoop.fs.s3a.endpoint=oss-cn-hangzhou.aliyuncs.com \
   --conf spark.hadoop.fs.s3a.endpoint.region=cn-hangzhou \
   --conf spark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem \
   --conf spark.hadoop.fs.s3a.path.style.access=false \
   --conf spark.hadoop.fs.s3a.content.encoding.enabled=false \
   --conf spark.hadoop.fs.s3a.payload.signing.enabled=false
   
   
   
   ### Environment
   
   **Hudi version:**
   **Query engine:** (Spark/Flink/Trino etc)
   **Relevant configs:**
   
   
   ### Logs and Stack Trace
   
   _No response_


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to