This is an automated email from the ASF dual-hosted git repository.
yao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 600f62e0edd [SPARK-44860][SQL] Add SESSION_USER function
600f62e0edd is described below
commit 600f62e0edd92f11f1bf940e87ea2a64e045a2e7
Author: Vitalii Li <[email protected]>
AuthorDate: Tue Aug 29 10:41:26 2023 +0800
[SPARK-44860][SQL] Add SESSION_USER function
### What changes were proposed in this pull request?
This change implements `SESSION_USER` expression. It behaves exactly the
same as `CURRENT_USER` but according to standard when respective function is
used inside a routine (UDF):
- `CURRENT_USER` should return security definer, i.e. owner of an UDF
- `SESSION_USER` should return connected user.
The code is duplicated for this reason - to be able to identify which
expression is used inside a routing.
### Why are the changes needed?
This is a missing expression defined by SQL standard.
### Does this PR introduce _any_ user-facing change?
Yes, this change introduces a new expression.
### How was this patch tested?
Updating existing unit tests.
Closes #42549 from vitaliili-db/session_user.
Authored-by: Vitalii Li <[email protected]>
Signed-off-by: Kent Yao <[email protected]>
---
.../main/scala/org/apache/spark/sql/functions.scala | 8 ++++++++
.../apache/spark/sql/PlanGenerationTestSuite.scala | 4 ++++
.../explain-results/function_session_user.explain | 2 ++
.../query-tests/queries/function_session_user.json | 20 ++++++++++++++++++++
.../queries/function_session_user.proto.bin | Bin 0 -> 174 bytes
python/pyspark/sql/tests/test_functions.py | 1 +
.../spark/sql/catalyst/parser/SqlBaseParser.g4 | 2 +-
.../catalyst/analysis/ColumnResolutionHelper.scala | 3 ++-
.../sql/catalyst/analysis/FunctionRegistry.scala | 1 +
.../spark/sql/catalyst/parser/AstBuilder.scala | 2 +-
.../main/scala/org/apache/spark/sql/functions.scala | 8 ++++++++
.../sql-functions/sql-expression-schema.md | 13 +++++++------
.../apache/spark/sql/DataFrameFunctionsSuite.scala | 3 ++-
.../org/apache/spark/sql/MiscFunctionsSuite.scala | 9 +++++----
.../ThriftServerWithSparkContextSuite.scala | 4 ++--
15 files changed, 64 insertions(+), 16 deletions(-)
diff --git
a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
index 7cd27ecaafb..8ea5f07c528 100644
---
a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
+++
b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
@@ -3346,6 +3346,14 @@ object functions {
*/
def user(): Column = Column.fn("user")
+ /**
+ * Returns the user name of current execution context.
+ *
+ * @group misc_funcs
+ * @since 4.0.0
+ */
+ def session_user(): Column = Column.fn("session_user")
+
/**
* Returns an universally unique identifier (UUID) string. The value is
returned as a canonical
* UUID 36-character string.
diff --git
a/connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala
b/connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala
index 4916ff1f597..ccd68f75bda 100644
---
a/connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala
+++
b/connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/PlanGenerationTestSuite.scala
@@ -1564,6 +1564,10 @@ class PlanGenerationTestSuite
fn.user()
}
+ functionTest("session_user") {
+ fn.session_user()
+ }
+
functionTest("md5") {
fn.md5(fn.col("g").cast("binary"))
}
diff --git
a/connector/connect/common/src/test/resources/query-tests/explain-results/function_session_user.explain
b/connector/connect/common/src/test/resources/query-tests/explain-results/function_session_user.explain
new file mode 100644
index 00000000000..82f5d2adcec
--- /dev/null
+++
b/connector/connect/common/src/test/resources/query-tests/explain-results/function_session_user.explain
@@ -0,0 +1,2 @@
+Project [current_user() AS current_user()#0]
++- LocalRelation <empty>, [id#0L, a#0, b#0, d#0, e#0, f#0, g#0]
diff --git
a/connector/connect/common/src/test/resources/query-tests/queries/function_session_user.json
b/connector/connect/common/src/test/resources/query-tests/queries/function_session_user.json
new file mode 100644
index 00000000000..07afa4a77c1
--- /dev/null
+++
b/connector/connect/common/src/test/resources/query-tests/queries/function_session_user.json
@@ -0,0 +1,20 @@
+{
+ "common": {
+ "planId": "1"
+ },
+ "project": {
+ "input": {
+ "common": {
+ "planId": "0"
+ },
+ "localRelation": {
+ "schema":
"struct\u003cid:bigint,a:int,b:double,d:struct\u003cid:bigint,a:int,b:double\u003e,e:array\u003cint\u003e,f:map\u003cstring,struct\u003cid:bigint,a:int,b:double\u003e\u003e,g:string\u003e"
+ }
+ },
+ "expressions": [{
+ "unresolvedFunction": {
+ "functionName": "session_user"
+ }
+ }]
+ }
+}
\ No newline at end of file
diff --git
a/connector/connect/common/src/test/resources/query-tests/queries/function_session_user.proto.bin
b/connector/connect/common/src/test/resources/query-tests/queries/function_session_user.proto.bin
new file mode 100644
index 00000000000..948e3eeed60
Binary files /dev/null and
b/connector/connect/common/src/test/resources/query-tests/queries/function_session_user.proto.bin
differ
diff --git a/python/pyspark/sql/tests/test_functions.py
b/python/pyspark/sql/tests/test_functions.py
index 0633b8c4341..5a8e36d287c 100644
--- a/python/pyspark/sql/tests/test_functions.py
+++ b/python/pyspark/sql/tests/test_functions.py
@@ -66,6 +66,7 @@ class FunctionsTestsMixin:
"random", # namespace conflict with python built-in module
"uuid", # namespace conflict with python built-in module
"chr", # namespace conflict with python built-in function
+ "session_user", # Scala only for now, needs implementation
]
jvm_fn_set.difference_update(jvm_excluded_fn)
diff --git
a/sql/api/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
b/sql/api/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
index 7a69b10dadb..6a6d39e96ca 100644
---
a/sql/api/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
+++
b/sql/api/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4
@@ -951,7 +951,7 @@ datetimeUnit
;
primaryExpression
- : name=(CURRENT_DATE | CURRENT_TIMESTAMP | CURRENT_USER | USER)
#currentLike
+ : name=(CURRENT_DATE | CURRENT_TIMESTAMP | CURRENT_USER | USER |
SESSION_USER) #currentLike
| name=(TIMESTAMPADD | DATEADD | DATE_ADD) LEFT_PAREN (unit=datetimeUnit |
invalidUnit=stringLit) COMMA unitsAmount=valueExpression COMMA
timestamp=valueExpression RIGHT_PAREN #timestampadd
| name=(TIMESTAMPDIFF | DATEDIFF | DATE_DIFF | TIMEDIFF) LEFT_PAREN
(unit=datetimeUnit | invalidUnit=stringLit) COMMA
startTimestamp=valueExpression COMMA endTimestamp=valueExpression RIGHT_PAREN
#timestampdiff
| CASE whenClause+ (ELSE elseExpression=expression)? END
#searchedCase
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ColumnResolutionHelper.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ColumnResolutionHelper.scala
index 56d1f5f3a10..6af348328e7 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ColumnResolutionHelper.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ColumnResolutionHelper.scala
@@ -95,12 +95,13 @@ trait ColumnResolutionHelper extends Logging {
}
}
- // support CURRENT_DATE, CURRENT_TIMESTAMP, and grouping__id
+ // support CURRENT_DATE, CURRENT_TIMESTAMP, CURRENT_USER, USER, SESSION_USER
and grouping__id
private val literalFunctions: Seq[(String, () => Expression, Expression =>
String)] = Seq(
(CurrentDate().prettyName, () => CurrentDate(), toPrettySQL(_)),
(CurrentTimestamp().prettyName, () => CurrentTimestamp(), toPrettySQL(_)),
(CurrentUser().prettyName, () => CurrentUser(), toPrettySQL),
("user", () => CurrentUser(), toPrettySQL),
+ ("session_user", () => CurrentUser(), toPrettySQL),
(VirtualColumn.hiveGroupingIdName, () => GroupingID(Nil), _ =>
VirtualColumn.hiveGroupingIdName)
)
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
index 5c0c7396a85..eade7afb7cb 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
@@ -741,6 +741,7 @@ object FunctionRegistry {
expression[CurrentCatalog]("current_catalog"),
expression[CurrentUser]("current_user"),
expression[CurrentUser]("user", setAlias = true),
+ expression[CurrentUser]("session_user", setAlias = true),
expression[CallMethodViaReflection]("reflect"),
expression[CallMethodViaReflection]("java_method", true),
expression[SparkVersion]("version"),
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index b6b205f8876..b1f212b1739 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -2099,7 +2099,7 @@ class AstBuilder extends DataTypeAstBuilder with
SQLConfHelper with Logging {
CurrentDate()
case SqlBaseParser.CURRENT_TIMESTAMP =>
CurrentTimestamp()
- case SqlBaseParser.CURRENT_USER | SqlBaseParser.USER =>
+ case SqlBaseParser.CURRENT_USER | SqlBaseParser.USER |
SqlBaseParser.SESSION_USER =>
CurrentUser()
}
} else {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 6b474c84cdb..d72191ce7f3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -3370,6 +3370,14 @@ object functions {
*/
def user(): Column = withExpr { CurrentUser() }
+ /**
+ * Returns the user name of current execution context.
+ *
+ * @group misc_funcs
+ * @since 4.0.0
+ */
+ def session_user(): Column = withExpr { CurrentUser() }
+
/**
* Returns an universally unique identifier (UUID) string. The value is
returned as a canonical
* UUID 36-character string.
diff --git a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
index 71fde8c7268..14b48db515b 100644
--- a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
+++ b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md
@@ -104,6 +104,7 @@
| org.apache.spark.sql.catalyst.expressions.CurrentTimeZone | current_timezone
| SELECT current_timezone() | struct<current_timezone():string> |
| org.apache.spark.sql.catalyst.expressions.CurrentTimestamp |
current_timestamp | SELECT current_timestamp() |
struct<current_timestamp():timestamp> |
| org.apache.spark.sql.catalyst.expressions.CurrentUser | current_user |
SELECT current_user() | struct<current_user():string> |
+| org.apache.spark.sql.catalyst.expressions.CurrentUser | session_user |
SELECT session_user() | struct<current_user():string> |
| org.apache.spark.sql.catalyst.expressions.CurrentUser | user | SELECT user()
| struct<current_user():string> |
| org.apache.spark.sql.catalyst.expressions.DateAdd | date_add | SELECT
date_add('2016-07-30', 1) | struct<date_add(2016-07-30, 1):date> |
| org.apache.spark.sql.catalyst.expressions.DateAdd | dateadd | SELECT
dateadd('2016-07-30', 1) | struct<date_add(2016-07-30, 1):date> |
@@ -131,8 +132,8 @@
| org.apache.spark.sql.catalyst.expressions.EqualTo | == | SELECT 2 == 2 |
struct<(2 = 2):boolean> |
| org.apache.spark.sql.catalyst.expressions.EulerNumber | e | SELECT e() |
struct<E():double> |
| org.apache.spark.sql.catalyst.expressions.Exp | exp | SELECT exp(0) |
struct<EXP(0):double> |
-| org.apache.spark.sql.catalyst.expressions.Explode | explode | SELECT
explode(array(10, 20)) | struct<col:int> |
-| org.apache.spark.sql.catalyst.expressions.Explode | explode_outer | SELECT
explode_outer(array(10, 20)) | struct<col:int> |
+| org.apache.spark.sql.catalyst.expressions.ExplodeExpressionBuilder | explode
| SELECT explode(array(10, 20)) | struct<col:int> |
+| org.apache.spark.sql.catalyst.expressions.ExplodeExpressionBuilder |
explode_outer | SELECT explode_outer(array(10, 20)) | struct<col:int> |
| org.apache.spark.sql.catalyst.expressions.Expm1 | expm1 | SELECT expm1(0) |
struct<EXPM1(0):double> |
| org.apache.spark.sql.catalyst.expressions.Extract | extract | SELECT
extract(YEAR FROM TIMESTAMP '2019-08-12 01:00:00.123456') | struct<extract(YEAR
FROM TIMESTAMP '2019-08-12 01:00:00.123456'):int> |
| org.apache.spark.sql.catalyst.expressions.Factorial | factorial | SELECT
factorial(5) | struct<factorial(5):bigint> |
@@ -212,7 +213,7 @@
| org.apache.spark.sql.catalyst.expressions.MapKeys | map_keys | SELECT
map_keys(map(1, 'a', 2, 'b')) | struct<map_keys(map(1, a, 2, b)):array<int>> |
| org.apache.spark.sql.catalyst.expressions.MapValues | map_values | SELECT
map_values(map(1, 'a', 2, 'b')) | struct<map_values(map(1, a, 2,
b)):array<string>> |
| org.apache.spark.sql.catalyst.expressions.MapZipWith | map_zip_with | SELECT
map_zip_with(map(1, 'a', 2, 'b'), map(1, 'x', 2, 'y'), (k, v1, v2) ->
concat(v1, v2)) | struct<map_zip_with(map(1, a, 2, b), map(1, x, 2, y),
lambdafunction(concat(namedlambdavariable(), namedlambdavariable()),
namedlambdavariable(), namedlambdavariable(),
namedlambdavariable())):map<int,string>> |
-| org.apache.spark.sql.catalyst.expressions.Mask | mask | SELECT
mask('abcd-EFGH-8765-4321') | struct<mask(abcd-EFGH-8765-4321, X, x, n,
NULL):string> |
+| org.apache.spark.sql.catalyst.expressions.MaskExpressionBuilder | mask |
SELECT mask('abcd-EFGH-8765-4321') | struct<mask(abcd-EFGH-8765-4321, X, x, n,
NULL):string> |
| org.apache.spark.sql.catalyst.expressions.Md5 | md5 | SELECT md5('Spark') |
struct<md5(Spark):string> |
| org.apache.spark.sql.catalyst.expressions.MicrosToTimestamp |
timestamp_micros | SELECT timestamp_micros(1230219000123123) |
struct<timestamp_micros(1230219000123123):timestamp> |
| org.apache.spark.sql.catalyst.expressions.MillisToTimestamp |
timestamp_millis | SELECT timestamp_millis(1230219000123) |
struct<timestamp_millis(1230219000123):timestamp> |
@@ -324,8 +325,8 @@
| org.apache.spark.sql.catalyst.expressions.Tanh | tanh | SELECT tanh(0) |
struct<TANH(0):double> |
| org.apache.spark.sql.catalyst.expressions.TimeWindow | window | SELECT a,
window.start, window.end, count(*) as cnt FROM VALUES ('A1', '2021-01-01
00:00:00'), ('A1', '2021-01-01 00:04:30'), ('A1', '2021-01-01 00:06:00'),
('A2', '2021-01-01 00:01:00') AS tab(a, b) GROUP by a, window(b, '5 minutes')
ORDER BY a, start | struct<a:string,start:timestamp,end:timestamp,cnt:bigint> |
| org.apache.spark.sql.catalyst.expressions.ToBinary | to_binary | SELECT
to_binary('abc', 'utf-8') | struct<to_binary(abc, utf-8):binary> |
-| org.apache.spark.sql.catalyst.expressions.ToCharacter | to_char | SELECT
to_char(454, '999') | struct<to_char(454, 999):string> |
-| org.apache.spark.sql.catalyst.expressions.ToCharacter | to_varchar | SELECT
to_varchar(454, '999') | struct<to_char(454, 999):string> |
+| org.apache.spark.sql.catalyst.expressions.ToCharacterBuilder | to_char |
SELECT to_char(454, '999') | struct<to_char(454, 999):string> |
+| org.apache.spark.sql.catalyst.expressions.ToCharacterBuilder | to_varchar |
SELECT to_varchar(454, '999') | struct<to_char(454, 999):string> |
| org.apache.spark.sql.catalyst.expressions.ToDegrees | degrees | SELECT
degrees(3.141592653589793) | struct<DEGREES(3.141592653589793):double> |
| org.apache.spark.sql.catalyst.expressions.ToNumber | to_number | SELECT
to_number('454', '999') | struct<to_number(454, 999):decimal(3,0)> |
| org.apache.spark.sql.catalyst.expressions.ToRadians | radians | SELECT
radians(180) | struct<RADIANS(180):double> |
@@ -385,7 +386,7 @@
| org.apache.spark.sql.catalyst.expressions.aggregate.Corr | corr | SELECT
corr(c1, c2) FROM VALUES (3, 2), (3, 3), (6, 4) as tab(c1, c2) |
struct<corr(c1, c2):double> |
| org.apache.spark.sql.catalyst.expressions.aggregate.Count | count | SELECT
count(*) FROM VALUES (NULL), (5), (5), (20) AS tab(col) |
struct<count(1):bigint> |
| org.apache.spark.sql.catalyst.expressions.aggregate.CountIf | count_if |
SELECT count_if(col % 2 = 0) FROM VALUES (NULL), (0), (1), (2), (3) AS tab(col)
| struct<count_if(((col % 2) = 0)):bigint> |
-| org.apache.spark.sql.catalyst.expressions.aggregate.CountMinSketchAgg |
count_min_sketch | SELECT hex(count_min_sketch(col, 0.5d, 0.5d, 1)) FROM VALUES
(1), (2), (1) AS tab(col) | struct<hex(count_min_sketch(col, 0.5, 0.5,
1)):string> |
+|
org.apache.spark.sql.catalyst.expressions.aggregate.CountMinSketchAggExpressionBuilder
| count_min_sketch | SELECT hex(count_min_sketch(col, 0.5d, 0.5d, 1)) FROM
VALUES (1), (2), (1) AS tab(col) | struct<hex(count_min_sketch(col, 0.5, 0.5,
1)):string> |
| org.apache.spark.sql.catalyst.expressions.aggregate.CovPopulation |
covar_pop | SELECT covar_pop(c1, c2) FROM VALUES (1,1), (2,2), (3,3) AS tab(c1,
c2) | struct<covar_pop(c1, c2):double> |
| org.apache.spark.sql.catalyst.expressions.aggregate.CovSample | covar_samp |
SELECT covar_samp(c1, c2) FROM VALUES (1,1), (2,2), (3,3) AS tab(c1, c2) |
struct<covar_samp(c1, c2):double> |
| org.apache.spark.sql.catalyst.expressions.aggregate.First | first | SELECT
first(col) FROM VALUES (10), (5), (20) AS tab(col) | struct<first(col):int> |
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
index 55ccd9ec1ff..8ca14385e59 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
@@ -5885,11 +5885,12 @@ class DataFrameFunctionsSuite extends QueryTest with
SharedSparkSession {
checkAnswer(df.selectExpr("CURRENT_SCHEMA()"), df.select(current_schema()))
}
- test("function current_user, user") {
+ test("function current_user, user, session_user") {
val df = Seq((1, 2), (3, 1)).toDF("a", "b")
checkAnswer(df.selectExpr("CURRENT_USER()"), df.select(current_user()))
checkAnswer(df.selectExpr("USER()"), df.select(user()))
+ checkAnswer(df.selectExpr("SESSION_USER()"), df.select(session_user()))
}
test("named_struct function") {
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/MiscFunctionsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/MiscFunctionsSuite.scala
index 074556fa2f9..d6e76fb6723 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/MiscFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/MiscFunctionsSuite.scala
@@ -48,15 +48,16 @@ class MiscFunctionsSuite extends QueryTest with
SharedSparkSession {
checkAnswer(df.selectExpr("version()"), df.select(version()))
}
- test("SPARK-21957: get current_user in normal spark apps") {
+ test("SPARK-21957, SPARK-44860: get current_user, session_user in normal
spark apps") {
val user = spark.sparkContext.sparkUser
withSQLConf(SQLConf.ANSI_ENABLED.key -> "false") {
- val df = sql("select current_user(), current_user, user, user()")
- checkAnswer(df, Row(user, user, user, user))
+ val df =
+ sql("select current_user(), current_user, user, user(),
session_user(), session_user")
+ checkAnswer(df, Row(user, user, user, user, user, user))
}
withSQLConf(SQLConf.ANSI_ENABLED.key -> "true",
SQLConf.ENFORCE_RESERVED_KEYWORDS.key -> "true") {
- Seq("user", "current_user").foreach { func =>
+ Seq("user", "current_user", "session_user").foreach { func =>
checkAnswer(sql(s"select $func"), Row(user))
checkError(
exception = intercept[ParseException](sql(s"select $func()")),
diff --git
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
index 8355a03125a..318328d71a8 100644
---
a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
+++
b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala
@@ -115,7 +115,7 @@ trait ThriftServerWithSparkContextSuite extends
SharedThriftServer {
}
}
- test("SPARK-21957: get current_user through thrift server") {
+ test("SPARK-21957: get current_user, user, session_user through thrift
server") {
val clientUser = "storm_earth_fire_heed_my_call"
val sql = "select current_user()"
@@ -126,7 +126,7 @@ trait ThriftServerWithSparkContextSuite extends
SharedThriftServer {
exec(s"set ${SQLConf.ANSI_ENABLED.key}=false")
- val userFuncs = Seq("user", "current_user")
+ val userFuncs = Seq("user", "current_user", "session_user")
userFuncs.foreach { func =>
val opHandle1 = exec(s"select $func(), $func")
val rowSet1 = client.fetchResults(opHandle1)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]