[ 
https://issues.apache.org/jira/browse/SPARK-28990?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

fengchaoge updated SPARK-28990:
-------------------------------
    Description: 
h6. SparkSQL create table as select from one table which may not exists throw 
exception like "org.apache.spark.sql.catalyst.analysis.UnresolvedException: 
Invalid call to toAttribute on unresolved object, tree: *" ,this is not 
friendly,spark user may have no idea about what's wrong.
h6. Simple sql can reproduce it,like this:

^create table default.spark as select * from default.dual;^

~spark-sql (default)> create table default.spark as select * from default.dual;~
 ~2019-09-05 16:27:24,127 INFO (main) [Logging.scala:logInfo(54)] - Parsing 
command: create table default.spark as select * from default.dual~
 ~2019-09-05 16:27:24,772 ERROR (main) [Logging.scala:logError(91)] - Failed in 
[create table default.spark as select * from default.dual]~
 ~org.apache.spark.sql.catalyst.analysis.UnresolvedException: Invalid call to 
toAttribute on unresolved object, tree: *~
 ~at 
org.apache.spark.sql.catalyst.analysis.Star.toAttribute(unresolved.scala:245)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.Project$$anonfun$output$1.apply(basicLogicalOperators.scala:52)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.Project$$anonfun$output$1.apply(basicLogicalOperators.scala:52)~
 ~at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)~
 ~at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)~
 ~at scala.collection.immutable.List.foreach(List.scala:392)~
 ~at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)~
 ~at scala.collection.immutable.List.map(List.scala:296)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:52)~
 ~at 
org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:160)~
 ~at 
org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:148)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1$$anonfun$2.apply(AnalysisHelper.scala:108)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1$$anonfun$2.apply(AnalysisHelper.scala:108)~
 ~at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1.apply(AnalysisHelper.scala:107)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1.apply(AnalysisHelper.scala:106)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsDown(AnalysisHelper.scala:106)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsDown(LogicalPlan.scala:29)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperators(AnalysisHelper.scala:73)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:29)~
 ~at org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:148)~
 ~at org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:147)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)~
 ~at 
scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)~
 ~at 
scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)~
 ~at scala.collection.mutable.ArrayBuffer.foldLeft(ArrayBuffer.scala:48)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)~
 ~at scala.collection.immutable.List.foreach(List.scala:392)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:121)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:106)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:105)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)~
 ~at 
org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)~
 ~at 
org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)~
 ~at 
org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)~
 ~at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:78)~
 ~at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:642)~
 ~at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:70)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:371)~
 ~at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:274)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)~
 ~at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)~
 ~at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)~
 ~at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)~
 ~at java.lang.reflect.Method.invoke(Method.java:498)~
 ~at 
org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)~
 ~at 
org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)~
 ~at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)~
 ~at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)~
 ~at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)~
 ~at 
org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)~
 ~at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)~
 ~at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)~

  was:
SparkSQL create table as select from one table which may not exists throw 
exception like "org.apache.spark.sql.catalyst.analysis.UnresolvedException: 
Invalid call to toAttribute on unresolved object, tree: *" ,this is not 
friendly,spark user may have no idea about what's wrong.

Simple sql can reproduce it,like this:

create table default.spark as select * from default.dual;

~spark-sql (default)> create table default.spark as select * from default.dual;~
 ~2019-09-05 16:27:24,127 INFO (main) [Logging.scala:logInfo(54)] - Parsing 
command: create table default.spark as select * from default.dual~
 ~2019-09-05 16:27:24,772 ERROR (main) [Logging.scala:logError(91)] - Failed in 
[create table default.spark as select * from default.dual]~
 ~org.apache.spark.sql.catalyst.analysis.UnresolvedException: Invalid call to 
toAttribute on unresolved object, tree: *~
 ~at 
org.apache.spark.sql.catalyst.analysis.Star.toAttribute(unresolved.scala:245)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.Project$$anonfun$output$1.apply(basicLogicalOperators.scala:52)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.Project$$anonfun$output$1.apply(basicLogicalOperators.scala:52)~
 ~at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)~
 ~at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)~
 ~at scala.collection.immutable.List.foreach(List.scala:392)~
 ~at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)~
 ~at scala.collection.immutable.List.map(List.scala:296)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:52)~
 ~at 
org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:160)~
 ~at 
org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:148)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1$$anonfun$2.apply(AnalysisHelper.scala:108)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1$$anonfun$2.apply(AnalysisHelper.scala:108)~
 ~at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1.apply(AnalysisHelper.scala:107)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1.apply(AnalysisHelper.scala:106)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsDown(AnalysisHelper.scala:106)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsDown(LogicalPlan.scala:29)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperators(AnalysisHelper.scala:73)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:29)~
 ~at org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:148)~
 ~at org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:147)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)~
 ~at 
scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)~
 ~at 
scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)~
 ~at scala.collection.mutable.ArrayBuffer.foldLeft(ArrayBuffer.scala:48)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)~
 ~at scala.collection.immutable.List.foreach(List.scala:392)~
 ~at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:121)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:106)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:105)~
 ~at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)~
 ~at 
org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)~
 ~at 
org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)~
 ~at 
org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)~
 ~at 
org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)~
 ~at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:78)~
 ~at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:642)~
 ~at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:70)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:371)~
 ~at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:274)~
 ~at 
org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)~
 ~at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)~
 ~at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)~
 ~at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)~
 ~at java.lang.reflect.Method.invoke(Method.java:498)~
 ~at 
org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)~
 ~at 
org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)~
 ~at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)~
 ~at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)~
 ~at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)~
 ~at 
org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)~
 ~at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)~
 ~at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)~


> SparkSQL invalid call to toAttribute on unresolved object, tree: *
> ------------------------------------------------------------------
>
>                 Key: SPARK-28990
>                 URL: https://issues.apache.org/jira/browse/SPARK-28990
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 2.4.3
>         Environment: Any
>            Reporter: fengchaoge
>            Priority: Major
>             Fix For: 2.4.4
>
>
> h6. SparkSQL create table as select from one table which may not exists throw 
> exception like "org.apache.spark.sql.catalyst.analysis.UnresolvedException: 
> Invalid call to toAttribute on unresolved object, tree: *" ,this is not 
> friendly,spark user may have no idea about what's wrong.
> h6. Simple sql can reproduce it,like this:
> ^create table default.spark as select * from default.dual;^
> ~spark-sql (default)> create table default.spark as select * from 
> default.dual;~
>  ~2019-09-05 16:27:24,127 INFO (main) [Logging.scala:logInfo(54)] - Parsing 
> command: create table default.spark as select * from default.dual~
>  ~2019-09-05 16:27:24,772 ERROR (main) [Logging.scala:logError(91)] - Failed 
> in [create table default.spark as select * from default.dual]~
>  ~org.apache.spark.sql.catalyst.analysis.UnresolvedException: Invalid call to 
> toAttribute on unresolved object, tree: *~
>  ~at 
> org.apache.spark.sql.catalyst.analysis.Star.toAttribute(unresolved.scala:245)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.Project$$anonfun$output$1.apply(basicLogicalOperators.scala:52)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.Project$$anonfun$output$1.apply(basicLogicalOperators.scala:52)~
>  ~at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)~
>  ~at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)~
>  ~at scala.collection.immutable.List.foreach(List.scala:392)~
>  ~at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)~
>  ~at scala.collection.immutable.List.map(List.scala:296)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:52)~
>  ~at 
> org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:160)~
>  ~at 
> org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:148)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1$$anonfun$2.apply(AnalysisHelper.scala:108)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1$$anonfun$2.apply(AnalysisHelper.scala:108)~
>  ~at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1.apply(AnalysisHelper.scala:107)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsDown$1.apply(AnalysisHelper.scala:106)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsDown(AnalysisHelper.scala:106)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsDown(LogicalPlan.scala:29)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperators(AnalysisHelper.scala:73)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:29)~
>  ~at org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:148)~
>  ~at org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:147)~
>  ~at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)~
>  ~at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)~
>  ~at 
> scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)~
>  ~at 
> scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)~
>  ~at scala.collection.mutable.ArrayBuffer.foldLeft(ArrayBuffer.scala:48)~
>  ~at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)~
>  ~at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)~
>  ~at scala.collection.immutable.List.foreach(List.scala:392)~
>  ~at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)~
>  ~at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127)~
>  ~at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:121)~
>  ~at 
> org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:106)~
>  ~at 
> org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:105)~
>  ~at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)~
>  ~at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)~
>  ~at 
> org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)~
>  ~at 
> org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)~
>  ~at 
> org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)~
>  ~at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:78)~
>  ~at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:642)~
>  ~at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)~
>  ~at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:70)~
>  ~at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:371)~
>  ~at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)~
>  ~at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:274)~
>  ~at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)~
>  ~at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)~
>  ~at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)~
>  ~at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)~
>  ~at java.lang.reflect.Method.invoke(Method.java:498)~
>  ~at 
> org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)~
>  ~at 
> org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)~
>  ~at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)~
>  ~at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)~
>  ~at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)~
>  ~at 
> org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)~
>  ~at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)~
>  ~at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)~



--
This message was sent by Atlassian Jira
(v8.3.2#803003)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to