[jira] [Assigned] (CALCITE-6400) MAP_ENTRIES does not allow null as a map key value #3787

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai reassigned CALCITE-6400:
---

Assignee: Caican Cai

> MAP_ENTRIES does not allow null as a map key value #3787
> 
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Assignee: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> map_entries does not allow null as the key value of the map. The null 
> mentioned here is a subset of the set, not that the set is empty.
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.Tr

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES does not allow null as a map key value #3787

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Description: 
map_entries does not allow null as the key value of the map. The null mentioned 
here is a subset of the set, not that the set is empty.
{code:java}
scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
array>]

scala> df.show()
org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
key.
  at 
org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
  at 
org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
  at 
org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
  at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
  at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
  at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
  at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
  at scala.collection.immutable.List.foreach(List.scala:431)
  at scala.collection.TraversableLike.map(TraversableLike.scala:286)
  at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
  at scala.collection.immutable.List.map(List.scala:305)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
  at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
  at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:93)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:46)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
  at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
  at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
  at scala.collection.immutable.List.foldLeft(List.scala:91)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
  at 
org.apache.spark.sql.catalyst.rules.RuleEx

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES does not allow null as a map key value #3787

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Summary: MAP_ENTRIES does not allow null as a map key value #3787  (was: 
map_entries does not allow null as a map key value)

> MAP_ENTRIES does not allow null as a map key value #3787
> 
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sq

[jira] [Commented] (CALCITE-6400) map_entries does not allow null as a map key value

2024-05-04 Thread Caican Cai (Jira)


[ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17843520#comment-17843520
 ] 

Caican Cai commented on CALCITE-6400:
-

thank you

> map_entries does not allow null as a map key value
> --
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:93)
>   at 

[jira] [Updated] (CALCITE-6400) map_entries does not allow null as a map key value

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Summary: map_entries does not allow null as a map key value  (was: 
MAP_ENTRIES  does not allow the key to be empty)

> map_entries does not allow null as a map key value
> --
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sql.catalyst.optimizer.

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES does not allow the key to be empty

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Component/s: core
 (was: avatica)

> MAP_ENTRIES  does not allow the key to be empty
> ---
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:93)
>   at 
> org.apache.spa

[jira] [Comment Edited] (CALCITE-6400) MAP_ENTRIES does not allow the key to be empty

2024-05-04 Thread Julian Hyde (Jira)


[ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17843477#comment-17843477
 ] 

Julian Hyde edited comment on CALCITE-6400 at 5/4/24 5:27 PM:
--

Can you improve the summary? It's not clear whether you think Calcite should 
allow the entry to be empty or should not. Also the test you have provided has 
a null value not an empty collection.

Also, 'avatica' is the wrong component.


was (Author: julianhyde):
Can you improve the summary? It's not clear whether you think Calcite should 
allow the entry to be empty or should not. Also the test you have provided has 
a null value not an empty collection.

Also, the component is not 'avatica'.

> MAP_ENTRIES  does not allow the key to be empty
> ---
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: avatica
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> 

[jira] [Comment Edited] (CALCITE-6400) MAP_ENTRIES does not allow the key to be empty

2024-05-04 Thread Julian Hyde (Jira)


[ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17843477#comment-17843477
 ] 

Julian Hyde edited comment on CALCITE-6400 at 5/4/24 5:26 PM:
--

Can you improve the summary? It's not clear whether you think Calcite should 
allow the entry to be empty or should not. Also the test you have provided has 
a null value not an empty collection.

Also, the component is not 'avatica'.


was (Author: julianhyde):
Can you improve the summary? It's not clear whether you think Calcite should 
allow the entry to be empty or should not. Also the test you have provided has 
a null value not an empty collection.

> MAP_ENTRIES  does not allow the key to be empty
> ---
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: avatica
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logic

[jira] [Commented] (CALCITE-6400) MAP_ENTRIES does not allow the key to be empty

2024-05-04 Thread Julian Hyde (Jira)


[ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17843477#comment-17843477
 ] 

Julian Hyde commented on CALCITE-6400:
--

Can you improve the summary? It's not clear whether you think Calcite should 
allow the entry to be empty or should not. Also the test you have provided has 
a null value not an empty collection.

> MAP_ENTRIES  does not allow the key to be empty
> ---
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: avatica
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES does not allow the key to be empty

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Summary: MAP_ENTRIES  does not allow the key to be empty  (was: MAP_ENTRIES 
simply does not allow the key to be empty)

> MAP_ENTRIES  does not allow the key to be empty
> ---
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: avatica
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sql.catalyst.optimizer.

[jira] [Updated] (CALCITE-6300) Function MAP_VALUES/MAP_KEYS gives exception when mapVauleType and mapKeyType not equals map Biggest mapKeytype or mapValueType

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6300?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6300:

Summary: Function MAP_VALUES/MAP_KEYS gives exception when mapVauleType and 
mapKeyType not equals map Biggest mapKeytype or mapValueType   (was: Function 
MAP_VALUES/MAP_KEYS/MAP_ENTRIES gives exception when mapVauleType and 
mapKeyType not equals map Biggest mapKeytype or mapValueType )

> Function MAP_VALUES/MAP_KEYS gives exception when mapVauleType and mapKeyType 
> not equals map Biggest mapKeytype or mapValueType 
> 
>
> Key: CALCITE-6300
> URL: https://issues.apache.org/jira/browse/CALCITE-6300
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Assignee: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> If we run the expression below in calcite, it will cause exception:
> {code:java}
> map_values(map('foo', cast (2 as tinyint), 'bar', 2)) {code}
> {code:java}
> java.lang.ClassCastException: java.lang.Byte cannot be cast to 
> java.lang.Integer
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$IntAccessor.getInt(AbstractCursor.java:522)
>at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.convertValue(AbstractCursor.java:1396)
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getObject(AbstractCursor.java:1377)
>  at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getArray(AbstractCursor.java:1432)
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getString(AbstractCursor.java:1444)
>  at 
> org.apache.calcite.avatica.AvaticaResultSet.getString(AvaticaResultSet.java:241)
>  at org.apache.calcite.util.JdbcTypeImpl$10.get(JdbcTypeImpl.java:112)   
> at org.apache.calcite.util.JdbcTypeImpl$10.get(JdbcTypeImpl.java:109)   at 
> org.apache.calcite.sql.test.ResultCheckers.compareResultSetWithMatcher(ResultCheckers.java:248)
>   at 
> org.apache.calcite.sql.test.ResultCheckers$MatcherResultChecker.checkResult(ResultCheckers.java:321)
>  at 
> org.apache.calcite.test.SqlOperatorTest$TesterImpl.check(SqlOperatorTest.java:14300)
>  at org.apache.calcite.sql.test.SqlTester.check(SqlTester.java:160)  at 
> org.apache.calcite.test.SqlOperatorFixtureImpl.lambda$checkScalar$2(SqlOperatorFixtureImpl.java:226)
>  at 
> org.apache.calcite.sql.test.AbstractSqlTester.forEachQuery(AbstractSqlTester.java:450)
>at 
> org.apache.calcite.test.SqlOperatorFixtureImpl.checkScalar(SqlOperatorFixtureImpl.java:225)
>   at 
> org.apache.calcite.sql.test.SqlOperatorFixture.checkScalar(SqlOperatorFixture.java:229)
>   at 
> org.apache.calcite.test.SqlOperatorTest.testMapValuesFunc(SqlOperatorTest.java:7276)
>  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)  at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) 
>at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498) at 
> org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:727)
>at 
> org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60)
> at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131)
>   at 
> org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:156)
>  at 
> org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:147)
>at 
> org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:86)
> at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(InterceptingExecutableInvoker.java:103)
>  at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.lambda$invoke$0(InterceptingExecutableInvoker.java:93)
>   at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106)
>  at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64)
> at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45)
>  at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37)
>  at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.invoke(InterceptingExecutableInvoker

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES simply does not allow the key to be empty

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Summary: MAP_ENTRIES simply does not allow the key to be empty  (was: 
MAP_ENTRIES is not allowed to be empty)

> MAP_ENTRIES simply does not allow the key to be empty
> -
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: avatica
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sql.catalyst.optimiz

[jira] [Commented] (CALCITE-6300) Function MAP_VALUES/MAP_KEYS/MAP_ENTRIES gives exception when mapVauleType and mapKeyType not equals map Biggest mapKeytype or mapValueType

2024-05-04 Thread Caican Cai (Jira)


[ 
https://issues.apache.org/jira/browse/CALCITE-6300?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17843447#comment-17843447
 ] 

Caican Cai commented on CALCITE-6300:
-

[~julianhyde] [~mbudiu] I thought about it and couldn't think of a more 
suitable solution. All I can guarantee is to adapt as many MAP functions as 
possible at once, so that subsequent developers can construct some functions 
without repeating them.

> Function MAP_VALUES/MAP_KEYS/MAP_ENTRIES gives exception when mapVauleType 
> and mapKeyType not equals map Biggest mapKeytype or mapValueType 
> 
>
> Key: CALCITE-6300
> URL: https://issues.apache.org/jira/browse/CALCITE-6300
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Assignee: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> If we run the expression below in calcite, it will cause exception:
> {code:java}
> map_values(map('foo', cast (2 as tinyint), 'bar', 2)) {code}
> {code:java}
> java.lang.ClassCastException: java.lang.Byte cannot be cast to 
> java.lang.Integer
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$IntAccessor.getInt(AbstractCursor.java:522)
>at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.convertValue(AbstractCursor.java:1396)
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getObject(AbstractCursor.java:1377)
>  at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getArray(AbstractCursor.java:1432)
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getString(AbstractCursor.java:1444)
>  at 
> org.apache.calcite.avatica.AvaticaResultSet.getString(AvaticaResultSet.java:241)
>  at org.apache.calcite.util.JdbcTypeImpl$10.get(JdbcTypeImpl.java:112)   
> at org.apache.calcite.util.JdbcTypeImpl$10.get(JdbcTypeImpl.java:109)   at 
> org.apache.calcite.sql.test.ResultCheckers.compareResultSetWithMatcher(ResultCheckers.java:248)
>   at 
> org.apache.calcite.sql.test.ResultCheckers$MatcherResultChecker.checkResult(ResultCheckers.java:321)
>  at 
> org.apache.calcite.test.SqlOperatorTest$TesterImpl.check(SqlOperatorTest.java:14300)
>  at org.apache.calcite.sql.test.SqlTester.check(SqlTester.java:160)  at 
> org.apache.calcite.test.SqlOperatorFixtureImpl.lambda$checkScalar$2(SqlOperatorFixtureImpl.java:226)
>  at 
> org.apache.calcite.sql.test.AbstractSqlTester.forEachQuery(AbstractSqlTester.java:450)
>at 
> org.apache.calcite.test.SqlOperatorFixtureImpl.checkScalar(SqlOperatorFixtureImpl.java:225)
>   at 
> org.apache.calcite.sql.test.SqlOperatorFixture.checkScalar(SqlOperatorFixture.java:229)
>   at 
> org.apache.calcite.test.SqlOperatorTest.testMapValuesFunc(SqlOperatorTest.java:7276)
>  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)  at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) 
>at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498) at 
> org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:727)
>at 
> org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60)
> at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131)
>   at 
> org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:156)
>  at 
> org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:147)
>at 
> org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:86)
> at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(InterceptingExecutableInvoker.java:103)
>  at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.lambda$invoke$0(InterceptingExecutableInvoker.java:93)
>   at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106)
>  at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64)
> at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45)
>  at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37)
>  at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvo

[jira] [Updated] (CALCITE-6300) Function MAP_VALUES/MAP_KEYS/MAP_ENTRIES gives exception when mapVauleType and mapKeyType not equals map Biggest mapKeytype or mapValueType

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6300?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6300:

Summary: Function MAP_VALUES/MAP_KEYS/MAP_ENTRIES gives exception when 
mapVauleType and mapKeyType not equals map Biggest mapKeytype or mapValueType   
(was: Function MAP_VALUES/MAP_KEYS gives exception when mapVauleType and 
mapKeyType not equals map Biggest mapKeytype or mapValueType )

> Function MAP_VALUES/MAP_KEYS/MAP_ENTRIES gives exception when mapVauleType 
> and mapKeyType not equals map Biggest mapKeytype or mapValueType 
> 
>
> Key: CALCITE-6300
> URL: https://issues.apache.org/jira/browse/CALCITE-6300
> Project: Calcite
>  Issue Type: Bug
>  Components: core
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Assignee: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> If we run the expression below in calcite, it will cause exception:
> {code:java}
> map_values(map('foo', cast (2 as tinyint), 'bar', 2)) {code}
> {code:java}
> java.lang.ClassCastException: java.lang.Byte cannot be cast to 
> java.lang.Integer
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$IntAccessor.getInt(AbstractCursor.java:522)
>at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.convertValue(AbstractCursor.java:1396)
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getObject(AbstractCursor.java:1377)
>  at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getArray(AbstractCursor.java:1432)
>   at 
> org.apache.calcite.avatica.util.AbstractCursor$ArrayAccessor.getString(AbstractCursor.java:1444)
>  at 
> org.apache.calcite.avatica.AvaticaResultSet.getString(AvaticaResultSet.java:241)
>  at org.apache.calcite.util.JdbcTypeImpl$10.get(JdbcTypeImpl.java:112)   
> at org.apache.calcite.util.JdbcTypeImpl$10.get(JdbcTypeImpl.java:109)   at 
> org.apache.calcite.sql.test.ResultCheckers.compareResultSetWithMatcher(ResultCheckers.java:248)
>   at 
> org.apache.calcite.sql.test.ResultCheckers$MatcherResultChecker.checkResult(ResultCheckers.java:321)
>  at 
> org.apache.calcite.test.SqlOperatorTest$TesterImpl.check(SqlOperatorTest.java:14300)
>  at org.apache.calcite.sql.test.SqlTester.check(SqlTester.java:160)  at 
> org.apache.calcite.test.SqlOperatorFixtureImpl.lambda$checkScalar$2(SqlOperatorFixtureImpl.java:226)
>  at 
> org.apache.calcite.sql.test.AbstractSqlTester.forEachQuery(AbstractSqlTester.java:450)
>at 
> org.apache.calcite.test.SqlOperatorFixtureImpl.checkScalar(SqlOperatorFixtureImpl.java:225)
>   at 
> org.apache.calcite.sql.test.SqlOperatorFixture.checkScalar(SqlOperatorFixture.java:229)
>   at 
> org.apache.calcite.test.SqlOperatorTest.testMapValuesFunc(SqlOperatorTest.java:7276)
>  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)  at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) 
>at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498) at 
> org.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:727)
>at 
> org.junit.jupiter.engine.execution.MethodInvocation.proceed(MethodInvocation.java:60)
> at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain$ValidatingInvocation.proceed(InvocationInterceptorChain.java:131)
>   at 
> org.junit.jupiter.engine.extension.TimeoutExtension.intercept(TimeoutExtension.java:156)
>  at 
> org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestableMethod(TimeoutExtension.java:147)
>at 
> org.junit.jupiter.engine.extension.TimeoutExtension.interceptTestMethod(TimeoutExtension.java:86)
> at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker$ReflectiveInterceptorCall.lambda$ofVoidMethod$0(InterceptingExecutableInvoker.java:103)
>  at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.lambda$invoke$0(InterceptingExecutableInvoker.java:93)
>   at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain$InterceptedInvocation.proceed(InvocationInterceptorChain.java:106)
>  at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.proceed(InvocationInterceptorChain.java:64)
> at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.chainAndInvoke(InvocationInterceptorChain.java:45)
>  at 
> org.junit.jupiter.engine.execution.InvocationInterceptorChain.invoke(InvocationInterceptorChain.java:37)
>  at 
> org.junit.jupiter.engine.execution.InterceptingExecutableInvoker.invoke(Inter

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES is not allowed to be empty

2024-05-04 Thread ASF GitHub Bot (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

ASF GitHub Bot updated CALCITE-6400:

Labels: pull-request-available  (was: )

> MAP_ENTRIES is not allowed to be empty
> --
>
> Key: CALCITE-6400
> URL: https://issues.apache.org/jira/browse/CALCITE-6400
> Project: Calcite
>  Issue Type: Bug
>  Components: avatica
>Affects Versions: 1.36.0
>Reporter: Caican Cai
>Priority: Major
>  Labels: pull-request-available
> Fix For: 1.37.0
>
>
> {code:java}
> scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
> df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
> array>]
> scala> df.show()
> org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
> key.
>   at 
> org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
>   at 
> org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
>   at 
> org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
>   at 
> org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
>   at 
> org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
>   at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
>   at scala.collection.immutable.List.foreach(List.scala:431)
>   at scala.collection.TraversableLike.map(TraversableLike.scala:286)
>   at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
>   at scala.collection.immutable.List.map(List.scala:305)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
>   at 
> org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
>   at 
> org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
>   at 
> org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:93)
>   at 
> org.apache.spark.sql.catalyst.optim

[jira] [Updated] (CALCITE-6400) MAP_ENTRIES is not allowed to be empty

2024-05-04 Thread Caican Cai (Jira)


 [ 
https://issues.apache.org/jira/browse/CALCITE-6400?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Caican Cai updated CALCITE-6400:

Description: 
{code:java}
scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
array>]

scala> df.show()
org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
key.
  at 
org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
  at 
org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
  at 
org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
  at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
  at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
  at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
  at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
  at scala.collection.immutable.List.foreach(List.scala:431)
  at scala.collection.TraversableLike.map(TraversableLike.scala:286)
  at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
  at scala.collection.immutable.List.map(List.scala:305)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
  at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
  at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:93)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:46)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
  at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
  at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
  at scala.collection.immutable.List.foldLeft(List.scala:91)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:219)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:211)
  at scala.collection.immutable.List.foreach(List.scala:431)
  at 
org.apache.s

[jira] [Created] (CALCITE-6400) MAP_ENTRIES is not allowed to be empty

2024-05-04 Thread Caican Cai (Jira)
Caican Cai created CALCITE-6400:
---

 Summary: MAP_ENTRIES is not allowed to be empty
 Key: CALCITE-6400
 URL: https://issues.apache.org/jira/browse/CALCITE-6400
 Project: Calcite
  Issue Type: Bug
  Components: avatica
Affects Versions: 1.36.0
Reporter: Caican Cai
 Fix For: 1.37.0


{code:java}
scala> val df = spark.sql("select map_entries(map('foo', 1, null, 2.0))")
df: org.apache.spark.sql.DataFrame = [map_entries(map(foo, 1, NULL, 2.0)): 
array>]scala> df.show()
org.apache.spark.SparkRuntimeException: [NULL_MAP_KEY] Cannot use null as map 
key.
  at 
org.apache.spark.sql.errors.QueryExecutionErrors$.nullAsMapKeyNotAllowedError(QueryExecutionErrors.scala:445)
  at 
org.apache.spark.sql.catalyst.util.ArrayBasedMapBuilder.put(ArrayBasedMapBuilder.scala:56)
  at 
org.apache.spark.sql.catalyst.expressions.CreateMap.eval(complexTypeCreator.scala:248)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.eval(Expression.scala:542)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:80)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.$anonfun$constantFolding$4(expressions.scala:90)
  at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1249)
  at 
org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1248)
  at 
org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:532)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.org$apache$spark$sql$catalyst$optimizer$ConstantFolding$$constantFolding(expressions.scala:90)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.$anonfun$applyOrElse$1(expressions.scala:94)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:207)
  at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:207)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:218)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$3(QueryPlan.scala:223)
  at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
  at scala.collection.immutable.List.foreach(List.scala:431)
  at scala.collection.TraversableLike.map(TraversableLike.scala:286)
  at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
  at scala.collection.immutable.List.map(List.scala:305)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:223)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:228)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:355)
  at 
org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:228)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:94)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$$anonfun$apply$1.applyOrElse(expressions.scala:93)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:512)
  at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:104)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:512)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
  at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:31)
  at 
org.apache.spark.sql.catalyst.trees.TreeNode.transformWithPruning(TreeNode.scala:478)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:93)
  at 
org.apache.spark.sql.catalyst.optimizer.ConstantFolding$.apply(expressions.scala:46)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:222)
  at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
  at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
  at scala.collection.immutable.List.foldLeft(List.scala:91)
  at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExec