gatorsmile commented on a change in pull request #28194: 
[SPARK-31372][SQL][TEST] Display expression schema for double check.
URL: https://github.com/apache/spark/pull/28194#discussion_r407235388
 
 

 ##########
 File path: sql/core/src/test/resources/sql-functions/output.out
 ##########
 @@ -0,0 +1,3409 @@
+-- Automatically generated by ExpressionsSchemaSuite
+-- Number of queries: 480
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringSpace
+
+-- Function name: space
+-- !query
+SELECT concat(space(2), '1')
+-- !query schema
+struct<concat(space(2), 1):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CreateArray
+
+-- Function name: array
+-- !query
+SELECT array(1, 2, 3)
+-- !query schema
+struct<array(1, 2, 3):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayExcept
+
+-- Function name: array_except
+-- !query
+SELECT array_except(array(1, 2, 3), array(1, 3, 5))
+-- !query schema
+struct<array_except(array(1, 2, 3), array(1, 3, 5)):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BitwiseXor
+
+-- Function name: ^
+-- !query
+SELECT 3 ^ 5
+-- !query schema
+struct<(3 ^ 5):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringRPad
+
+-- Function name: rpad
+-- !query
+SELECT rpad('hi', 5, '??')
+-- !query schema
+struct<rpad(hi, 5, ??):string>
+-- !query
+SELECT rpad('hi', 1, '??')
+-- !query schema
+struct<rpad(hi, 1, ??):string>
+-- !query
+SELECT rpad('hi', 5)
+-- !query schema
+struct<rpad(hi, 5,  ):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SchemaOfJson
+
+-- Function name: schema_of_json
+-- !query
+SELECT schema_of_json('[{"col":0}]')
+-- !query schema
+struct<schema_of_json([{"col":0}]):string>
+-- !query
+SELECT schema_of_json('[{"col":01}]', map('allowNumericLeadingZeros', 'true'))
+-- !query schema
+struct<schema_of_json([{"col":01}]):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ParseToTimestamp
+
+-- Function name: to_timestamp
+-- !query
+SELECT to_timestamp('2016-12-31 00:12:00')
+-- !query schema
+struct<to_timestamp('2016-12-31 00:12:00'):timestamp>
+-- !query
+SELECT to_timestamp('2016-12-31', 'yyyy-MM-dd')
+-- !query schema
+struct<to_timestamp('2016-12-31', 'yyyy-MM-dd'):timestamp>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathInt
+
+-- Function name: xpath_int
+-- !query
+SELECT xpath_int('<a><b>1</b><b>2</b></a>', 'sum(a/b)')
+-- !query schema
+struct<xpath_int(<a><b>1</b><b>2</b></a>, sum(a/b)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.VariancePop
+
+-- Function name: var_pop
+-- !query
+SELECT var_pop(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<var_pop(CAST(col AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Hex
+
+-- Function name: hex
+-- !query
+SELECT hex(17)
+-- !query schema
+struct<hex(CAST(17 AS BIGINT)):string>
+-- !query
+SELECT hex('Spark SQL')
+-- !query schema
+struct<hex(Spark SQL):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArraysZip
+
+-- Function name: arrays_zip
+-- !query
+SELECT arrays_zip(array(1, 2, 3), array(2, 3, 4))
+-- !query schema
+struct<arrays_zip(array(1, 2, 3), array(2, 3, 4)):array<struct<0:int,1:int>>>
+-- !query
+SELECT arrays_zip(array(1, 2), array(2, 3), array(3, 4))
+-- !query schema
+struct<arrays_zip(array(1, 2), array(2, 3), array(3, 
4)):array<struct<0:int,1:int,2:int>>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.InputFileName
+
+-- Function name: input_file_name
+
+
+-- Class name: 
org.apache.spark.sql.catalyst.expressions.MonotonicallyIncreasingID
+
+-- Function name: monotonically_increasing_id
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DayOfMonth
+
+-- Function name: day
+-- !query
+SELECT day('2009-07-30')
+-- !query schema
+struct<day(CAST(2009-07-30 AS DATE)):int>
+
+-- Function name: dayofmonth
+-- !query
+SELECT dayofmonth('2009-07-30')
+-- !query schema
+struct<dayofmonth(CAST(2009-07-30 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ElementAt
+
+-- Function name: element_at
+-- !query
+SELECT element_at(array(1, 2, 3), 2)
+-- !query schema
+struct<element_at(array(1, 2, 3), 2):int>
+-- !query
+SELECT element_at(map(1, 'a', 2, 'b'), 2)
+-- !query schema
+struct<element_at(map(1, a, 2, b), 2):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.WeekDay
+
+-- Function name: weekday
+-- !query
+SELECT weekday('2009-07-30')
+-- !query schema
+struct<weekday(CAST(2009-07-30 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathLong
+
+-- Function name: xpath_long
+-- !query
+SELECT xpath_long('<a><b>1</b><b>2</b></a>', 'sum(a/b)')
+-- !query schema
+struct<xpath_long(<a><b>1</b><b>2</b></a>, sum(a/b)):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CumeDist
+
+-- Function name: cume_dist
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayMin
+
+-- Function name: array_min
+-- !query
+SELECT array_min(array(1, 20, null, 3))
+-- !query schema
+struct<array_min(array(1, 20, CAST(NULL AS INT), 3)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.MaxBy
+
+-- Function name: max_by
+-- !query
+SELECT max_by(x, y) FROM VALUES (('a', 10)), (('b', 50)), (('c', 20)) AS 
tab(x, y)
+-- !query schema
+struct<maxby(x, y):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Rank
+
+-- Function name: rank
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Right
+
+-- Function name: right
+-- !query
+SELECT right('Spark SQL', 3)
+-- !query schema
+struct<right('Spark SQL', 3):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Least
+
+-- Function name: least
+-- !query
+SELECT least(10, 9, 2, 4, 3)
+-- !query schema
+struct<least(10, 9, 2, 4, 3):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Lower
+
+-- Function name: lcase
+-- !query
+SELECT lcase('SparkSql')
+-- !query schema
+struct<lower(SparkSql):string>
+
+-- Function name: lower
+-- !query
+SELECT lower('SparkSql')
+-- !query schema
+struct<lower(SparkSql):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Nvl
+
+-- Function name: nvl
+-- !query
+SELECT nvl(NULL, array('2'))
+-- !query schema
+struct<nvl(NULL, array('2')):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Pmod
+
+-- Function name: pmod
+-- !query
+SELECT pmod(10, 3)
+-- !query schema
+struct<pmod(10, 3):int>
+-- !query
+SELECT pmod(-10, 3)
+-- !query schema
+struct<pmod(-10, 3):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Chr
+
+-- Function name: char
+-- !query
+SELECT char(65)
+-- !query schema
+struct<char(CAST(65 AS BIGINT)):string>
+
+-- Function name: chr
+-- !query
+SELECT chr(65)
+-- !query schema
+struct<chr(CAST(65 AS BIGINT)):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.GetJsonObject
+
+-- Function name: get_json_object
+-- !query
+SELECT get_json_object('{"a":"b"}', '$.a')
+-- !query schema
+struct<get_json_object({"a":"b"}, $.a):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ToDegrees
+
+-- Function name: degrees
+-- !query
+SELECT degrees(3.141592653589793)
+-- !query schema
+struct<DEGREES(CAST(3.141592653589793 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayExists
+
+-- Function name: exists
+-- !query
+SELECT exists(array(1, 2, 3), x -> x % 2 == 0)
+-- !query schema
+struct<exists(array(1, 2, 3), lambdafunction(((namedlambdavariable() % 2) = 
0), namedlambdavariable())):boolean>
+-- !query
+SELECT exists(array(1, 2, 3), x -> x % 2 == 10)
+-- !query schema
+struct<exists(array(1, 2, 3), lambdafunction(((namedlambdavariable() % 2) = 
10), namedlambdavariable())):boolean>
+-- !query
+SELECT exists(array(1, null, 3), x -> x % 2 == 0)
+-- !query schema
+struct<exists(array(1, CAST(NULL AS INT), 3), 
lambdafunction(((namedlambdavariable() % 2) = 0), 
namedlambdavariable())):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Tanh
+
+-- Function name: tanh
+-- !query
+SELECT tanh(0)
+-- !query schema
+struct<TANH(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringSplit
+
+-- Function name: split
+-- !query
+SELECT split('oneAtwoBthreeC', '[ABC]')
+-- !query schema
+struct<split(oneAtwoBthreeC, [ABC], -1):array<string>>
+-- !query
+SELECT split('oneAtwoBthreeC', '[ABC]', -1)
+-- !query schema
+struct<split(oneAtwoBthreeC, [ABC], -1):array<string>>
+-- !query
+SELECT split('oneAtwoBthreeC', '[ABC]', 2)
+-- !query schema
+struct<split(oneAtwoBthreeC, [ABC], 2):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.GroupingID
+
+-- Function name: grouping_id
+-- !query
+SELECT name, grouping_id(), sum(age), avg(height) FROM VALUES (2, 'Alice', 
165), (5, 'Bob', 180) people(age, name, height) GROUP BY cube(name, height)
+-- !query schema
+struct<name:string,grouping_id():bigint,sum(age):bigint,avg(height):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Expm1
+
+-- Function name: expm1
+-- !query
+SELECT expm1(0)
+-- !query schema
+struct<EXPM1(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Quarter
+
+-- Function name: quarter
+-- !query
+SELECT quarter('2016-08-31')
+-- !query schema
+struct<quarter(CAST(2016-08-31 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Atan2
+
+-- Function name: atan2
+-- !query
+SELECT atan2(0, 0)
+-- !query schema
+struct<ATAN2(CAST(0 AS DOUBLE), CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.TypeOf
+
+-- Function name: typeof
+-- !query
+SELECT typeof(1)
+-- !query schema
+struct<typeof(1):string>
+-- !query
+SELECT typeof(array(1))
+-- !query schema
+struct<typeof(array(1)):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sequence
+
+-- Function name: sequence
+-- !query
+SELECT sequence(1, 5)
+-- !query schema
+struct<sequence(1, 5):array<int>>
+-- !query
+SELECT sequence(5, 1)
+-- !query schema
+struct<sequence(5, 1):array<int>>
+-- !query
+SELECT sequence(to_date('2018-01-01'), to_date('2018-03-01'), interval 1 month)
+-- !query schema
+struct<sequence(to_date('2018-01-01'), to_date('2018-03-01'), INTERVAL '1 
months'):array<date>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.InputFileBlockStart
+
+-- Function name: input_file_block_start
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ConcatWs
+
+-- Function name: concat_ws
+-- !query
+SELECT concat_ws(' ', 'Spark', 'SQL')
+-- !query schema
+struct<concat_ws( , Spark, SQL):string>
+
+
+-- Class name: 
org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile
+
+-- Function name: approx_percentile
+-- !query
+SELECT approx_percentile(10.0, array(0.5, 0.4, 0.1), 100)
+-- !query schema
+struct<approx_percentile(10.0, array(0.5, 0.4, 0.1), 100):array<decimal(3,1)>>
+-- !query
+SELECT approx_percentile(10.0, 0.5, 100)
+-- !query schema
+struct<approx_percentile(10.0, CAST(0.5 AS DOUBLE), 100):decimal(3,1)>
+
+-- Function name: percentile_approx
+-- !query
+SELECT percentile_approx(10.0, array(0.5, 0.4, 0.1), 100)
+-- !query schema
+struct<percentile_approx(10.0, array(0.5, 0.4, 0.1), 100):array<decimal(3,1)>>
+-- !query
+SELECT percentile_approx(10.0, 0.5, 100)
+-- !query schema
+struct<percentile_approx(10.0, CAST(0.5 AS DOUBLE), 100):decimal(3,1)>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.TimeWindow
+
+-- Function name: window
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringLocate
+
+-- Function name: position
+-- !query
+SELECT position('bar', 'foobarbar')
+-- !query schema
+struct<locate(bar, foobarbar, 1):int>
+-- !query
+SELECT position('bar', 'foobarbar', 5)
+-- !query schema
+struct<locate(bar, foobarbar, 5):int>
+-- !query
+SELECT POSITION('bar' IN 'foobarbar')
+-- !query schema
+struct<locate(bar, foobarbar, 1):int>
+
+-- Function name: locate
+-- !query
+SELECT locate('bar', 'foobarbar')
+-- !query schema
+struct<locate(bar, foobarbar, 1):int>
+-- !query
+SELECT locate('bar', 'foobarbar', 5)
+-- !query schema
+struct<locate(bar, foobarbar, 5):int>
+-- !query
+SELECT POSITION('bar' IN 'foobarbar')
+-- !query schema
+struct<locate(bar, foobarbar, 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.FormatNumber
+
+-- Function name: format_number
+-- !query
+SELECT format_number(12332.123456, 4)
+-- !query schema
+struct<format_number(12332.123456, 4):string>
+-- !query
+SELECT format_number(12332.123456, '##################.###')
+-- !query schema
+struct<format_number(12332.123456, ##################.###):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.UnaryPositive
+
+-- Function name: positive
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Corr
+
+-- Function name: corr
+-- !query
+SELECT corr(c1, c2) FROM VALUES (3, 2), (3, 3), (6, 4) as tab(c1, c2)
+-- !query schema
+struct<corr(CAST(c1 AS DOUBLE), CAST(c2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Md5
+
+-- Function name: md5
+-- !query
+SELECT md5('Spark')
+-- !query schema
+struct<md5(CAST(Spark AS BINARY)):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StructsToJson
+
+-- Function name: to_json
+-- !query
+SELECT to_json(named_struct('a', 1, 'b', 2))
+-- !query schema
+struct<to_json(named_struct(a, 1, b, 2)):string>
+-- !query
+SELECT to_json(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), 
map('timestampFormat', 'dd/MM/yyyy'))
+-- !query schema
+struct<to_json(named_struct(time, to_timestamp('2015-08-26', 
'yyyy-MM-dd'))):string>
+-- !query
+SELECT to_json(array(named_struct('a', 1, 'b', 2)))
+-- !query schema
+struct<to_json(array(named_struct(a, 1, b, 2))):string>
+-- !query
+SELECT to_json(map('a', named_struct('b', 1)))
+-- !query schema
+struct<to_json(map(a, named_struct(b, 1))):string>
+-- !query
+SELECT to_json(map(named_struct('a', 1),named_struct('b', 2)))
+-- !query schema
+struct<to_json(map(named_struct(a, 1), named_struct(b, 2))):string>
+-- !query
+SELECT to_json(map('a', 1))
+-- !query schema
+struct<to_json(map(a, 1)):string>
+-- !query
+SELECT to_json(array((map('a', 1))))
+-- !query schema
+struct<to_json(array(map(a, 1))):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.StddevPop
+
+-- Function name: stddev_pop
+-- !query
+SELECT stddev_pop(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<stddev_pop(CAST(col AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Rint
+
+-- Function name: rint
+-- !query
+SELECT rint(12.3456)
+-- !query schema
+struct<ROUND(CAST(12.3456 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapFromArrays
+
+-- Function name: map_from_arrays
+-- !query
+SELECT map_from_arrays(array(1.0, 3.0), array('2', '4'))
+-- !query schema
+struct<map_from_arrays(array(1.0, 3.0), array(2, 4)):map<decimal(2,1),string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sinh
+
+-- Function name: sinh
+-- !query
+SELECT sinh(0)
+-- !query schema
+struct<SINH(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Lag
+
+-- Function name: lag
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.GreaterThanOrEqual
+
+-- Function name: >=
+-- !query
+SELECT 2 >= 1
+-- !query schema
+struct<(2 >= 1):boolean>
+-- !query
+SELECT 2.0 >= '2.1'
+-- !query schema
+struct<(CAST(2.0 AS DOUBLE) >= CAST(2.1 AS DOUBLE)):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') >= to_date('2009-07-30 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') >= to_date('2009-07-30 
04:17:52')):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') >= to_date('2009-08-01 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') >= to_date('2009-08-01 
04:17:52')):boolean>
+-- !query
+SELECT 1 >= NULL
+-- !query schema
+struct<(1 >= CAST(NULL AS INT)):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BitwiseAnd
+
+-- Function name: &
+-- !query
+SELECT 3 & 5
+-- !query schema
+struct<(3 & 5):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.First
+
+-- Function name: first_value
+-- !query
+SELECT first_value(col) FROM VALUES (10), (5), (20) AS tab(col)
+-- !query schema
+struct<first_value(col, false):int>
+-- !query
+SELECT first_value(col) FROM VALUES (NULL), (5), (20) AS tab(col)
+-- !query schema
+struct<first_value(col, false):int>
+-- !query
+SELECT first_value(col, true) FROM VALUES (NULL), (5), (20) AS tab(col)
+-- !query schema
+struct<first_value(col, true):int>
+
+-- Function name: first
+-- !query
+SELECT first(col) FROM VALUES (10), (5), (20) AS tab(col)
+-- !query schema
+struct<first(col, false):int>
+-- !query
+SELECT first(col) FROM VALUES (NULL), (5), (20) AS tab(col)
+-- !query schema
+struct<first(col, false):int>
+-- !query
+SELECT first(col, true) FROM VALUES (NULL), (5), (20) AS tab(col)
+-- !query schema
+struct<first(col, true):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.TruncDate
+
+-- Function name: trunc
+-- !query
+SELECT trunc('2019-08-04', 'week')
+-- !query schema
+struct<trunc(CAST(2019-08-04 AS DATE), week):date>
+-- !query
+SELECT trunc('2019-08-04', 'quarter')
+-- !query schema
+struct<trunc(CAST(2019-08-04 AS DATE), quarter):date>
+-- !query
+SELECT trunc('2009-02-12', 'MM')
+-- !query schema
+struct<trunc(CAST(2009-02-12 AS DATE), MM):date>
+-- !query
+SELECT trunc('2015-10-27', 'YEAR')
+-- !query schema
+struct<trunc(CAST(2015-10-27 AS DATE), YEAR):date>
+-- !query
+SELECT trunc('2015-10-27', 'DECADE')
+-- !query schema
+struct<trunc(CAST(2015-10-27 AS DATE), DECADE):date>
+-- !query
+SELECT trunc('1981-01-19', 'century')
+-- !query schema
+struct<trunc(CAST(1981-01-19 AS DATE), century):date>
+-- !query
+SELECT trunc('1981-01-19', 'millennium')
+-- !query schema
+struct<trunc(CAST(1981-01-19 AS DATE), millennium):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathBoolean
+
+-- Function name: xpath_boolean
+-- !query
+SELECT xpath_boolean('<a><b>1</b></a>','a/b')
+-- !query schema
+struct<xpath_boolean(<a><b>1</b></a>, a/b):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MakeInterval
+
+-- Function name: make_interval
+-- !query
+SELECT make_interval(100, 11, 1, 1, 12, 30, 01.001001)
+-- !query schema
+struct<make_interval(100, 11, 1, 1, 12, 30, CAST(1.001001 AS 
DECIMAL(8,6))):interval>
+-- !query
+SELECT make_interval(100, null, 3)
+-- !query schema
+struct<make_interval(100, CAST(NULL AS INT), 3, 0, 0, 0, 0.000000):interval>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Atanh
+
+-- Function name: atanh
+-- !query
+SELECT atanh(0)
+-- !query schema
+struct<ATANH(CAST(0 AS DOUBLE)):double>
+-- !query
+SELECT atanh(2)
+-- !query schema
+struct<ATANH(CAST(2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.FindInSet
+
+-- Function name: find_in_set
+-- !query
+SELECT find_in_set('ab','abc,b,ab,c,def')
+-- !query schema
+struct<find_in_set(ab, abc,b,ab,c,def):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.LengthOfJsonArray
+
+-- Function name: json_array_length
+-- !query
+SELECT json_array_length('[1,2,3,4]')
+-- !query schema
+struct<json_array_length([1,2,3,4]):int>
+-- !query
+SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]')
+-- !query schema
+struct<json_array_length([1,2,3,{"f1":1,"f2":[5,6]},4]):int>
+-- !query
+SELECT json_array_length('[1,2')
+-- !query schema
+struct<json_array_length([1,2):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.BitXorAgg
+
+-- Function name: bit_xor
+-- !query
+SELECT bit_xor(col) FROM VALUES (3), (5) AS tab(col)
+-- !query schema
+struct<bit_xor(col):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Decode
+
+-- Function name: decode
+-- !query
+SELECT decode(encode('abc', 'utf-8'), 'utf-8')
+-- !query schema
+struct<decode(encode(abc, utf-8), utf-8):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Coalesce
+
+-- Function name: coalesce
+-- !query
+SELECT coalesce(NULL, 1, NULL)
+-- !query schema
+struct<coalesce(CAST(NULL AS INT), 1, CAST(NULL AS INT)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.RegExpReplace
+
+-- Function name: regexp_replace
+-- !query
+SELECT regexp_replace('100-200', '(\\d+)', 'num')
+-- !query schema
+struct<regexp_replace(100-200, (\d+), num):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.VarianceSamp
+
+-- Function name: var_samp
+-- !query
+SELECT var_samp(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<var_samp(CAST(col AS DOUBLE)):double>
+
+-- Function name: variance
+-- !query
+SELECT variance(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<variance(CAST(col AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Cos
+
+-- Function name: cos
+-- !query
+SELECT cos(0)
+-- !query schema
+struct<COS(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayFilter
+
+-- Function name: filter
+-- !query
+SELECT filter(array(1, 2, 3), x -> x % 2 == 1)
+-- !query schema
+struct<filter(array(1, 2, 3), lambdafunction(((namedlambdavariable() % 2) = 
1), namedlambdavariable())):array<int>>
+-- !query
+SELECT filter(array(0, 2, 3), (x, i) -> x > i)
+-- !query schema
+struct<filter(array(0, 2, 3), lambdafunction((namedlambdavariable() > 
namedlambdavariable()), namedlambdavariable(), 
namedlambdavariable())):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.PosExplode
+
+-- Function name: posexplode_outer
+-- !query
+SELECT posexplode_outer(array(10,20))
+-- !query schema
+struct<pos:int,col:int>
+
+-- Function name: posexplode
+-- !query
+SELECT posexplode(array(10,20))
+-- !query schema
+struct<pos:int,col:int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.InputFileBlockLength
+
+-- Function name: input_file_block_length
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.BoolAnd
+
+-- Function name: every
+-- !query
+SELECT every(col) FROM VALUES (true), (true), (true) AS tab(col)
+-- !query schema
+struct<every(col):boolean>
+-- !query
+SELECT every(col) FROM VALUES (NULL), (true), (true) AS tab(col)
+-- !query schema
+struct<every(col):boolean>
+-- !query
+SELECT every(col) FROM VALUES (true), (false), (true) AS tab(col)
+-- !query schema
+struct<every(col):boolean>
+
+-- Function name: bool_and
+-- !query
+SELECT bool_and(col) FROM VALUES (true), (true), (true) AS tab(col)
+-- !query schema
+struct<bool_and(col):boolean>
+-- !query
+SELECT bool_and(col) FROM VALUES (NULL), (true), (true) AS tab(col)
+-- !query schema
+struct<bool_and(col):boolean>
+-- !query
+SELECT bool_and(col) FROM VALUES (true), (false), (true) AS tab(col)
+-- !query schema
+struct<bool_and(col):boolean>
+
+
+-- Class name: 
org.apache.spark.sql.catalyst.expressions.aggregate.CountMinSketchAgg
+
+-- Function name: count_min_sketch
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.AssertTrue
+
+-- Function name: assert_true
+-- !query
+SELECT assert_true(0 < 1)
+-- !query schema
+struct<assert_true((0 < 1)):null>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CurrentDate
+
+-- Function name: current_date
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MonthsBetween
+
+-- Function name: months_between
+-- !query
+SELECT months_between('1997-02-28 10:30:00', '1996-10-30')
+-- !query schema
+struct<months_between(CAST(1997-02-28 10:30:00 AS TIMESTAMP), CAST(1996-10-30 
AS TIMESTAMP), true):double>
+-- !query
+SELECT months_between('1997-02-28 10:30:00', '1996-10-30', false)
+-- !query schema
+struct<months_between(CAST(1997-02-28 10:30:00 AS TIMESTAMP), CAST(1996-10-30 
AS TIMESTAMP), false):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.EqualNullSafe
+
+-- Function name: <=>
+-- !query
+SELECT 2 <=> 2
+-- !query schema
+struct<(2 <=> 2):boolean>
+-- !query
+SELECT 1 <=> '1'
+-- !query schema
+struct<(1 <=> CAST(1 AS INT)):boolean>
+-- !query
+SELECT true <=> NULL
+-- !query schema
+struct<(true <=> CAST(NULL AS BOOLEAN)):boolean>
+-- !query
+SELECT NULL <=> NULL
+-- !query schema
+struct<(NULL <=> NULL):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Add
+
+-- Function name: +
+-- !query
+SELECT 1 + 2
+-- !query schema
+struct<(1 + 2):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Multiply
+
+-- Function name: *
+-- !query
+SELECT 2 * 3
+-- !query schema
+struct<(2 * 3):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DatePart
+
+-- Function name: date_part
+-- !query
+SELECT date_part('YEAR', TIMESTAMP '2019-08-12 01:00:00.123456')
+-- !query schema
+struct<date_part('YEAR', TIMESTAMP '2019-08-12 01:00:00.123456'):int>
+-- !query
+SELECT date_part('week', timestamp'2019-08-12 01:00:00.123456')
+-- !query schema
+struct<date_part('week', TIMESTAMP '2019-08-12 01:00:00.123456'):int>
+-- !query
+SELECT date_part('doy', DATE'2019-08-12')
+-- !query schema
+struct<date_part('doy', DATE '2019-08-12'):int>
+-- !query
+SELECT date_part('SECONDS', timestamp'2019-10-01 00:00:01.000001')
+-- !query schema
+struct<date_part('SECONDS', TIMESTAMP '2019-10-01 
00:00:01.000001'):decimal(8,6)>
+-- !query
+SELECT date_part('days', interval 1 year 10 months 5 days)
+-- !query schema
+struct<date_part('days', INTERVAL '1 years 10 months 5 days'):int>
+-- !query
+SELECT date_part('seconds', interval 5 hours 30 seconds 1 milliseconds 1 
microseconds)
+-- !query schema
+struct<date_part('seconds', INTERVAL '5 hours 30.001001 seconds'):decimal(8,6)>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ShiftLeft
+
+-- Function name: shiftleft
+-- !query
+SELECT shiftleft(2, 1)
+-- !query schema
+struct<shiftleft(2, 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.GreaterThan
+
+-- Function name: >
+-- !query
+SELECT 2 > 1
+-- !query schema
+struct<(2 > 1):boolean>
+-- !query
+SELECT 2 > '1.1'
+-- !query schema
+struct<(2 > CAST(1.1 AS INT)):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') > to_date('2009-07-30 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') > to_date('2009-07-30 
04:17:52')):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') > to_date('2009-08-01 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') > to_date('2009-08-01 
04:17:52')):boolean>
+-- !query
+SELECT 1 > NULL
+-- !query schema
+struct<(1 > CAST(NULL AS INT)):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Slice
+
+-- Function name: slice
+-- !query
+SELECT slice(array(1, 2, 3, 4), 2, 2)
+-- !query schema
+struct<slice(array(1, 2, 3, 4), 2, 2):array<int>>
+-- !query
+SELECT slice(array(1, 2, 3, 4), -2, 2)
+-- !query schema
+struct<slice(array(1, 2, 3, 4), -2, 2):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sentences
+
+-- Function name: sentences
+-- !query
+SELECT sentences('Hi there! Good morning.')
+-- !query schema
+struct<sentences(Hi there! Good morning., , ):array<array<string>>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SoundEx
+
+-- Function name: soundex
+-- !query
+SELECT soundex('Miller')
+-- !query schema
+struct<soundex(Miller):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.AddMonths
+
+-- Function name: add_months
+-- !query
+SELECT add_months('2016-08-31', 1)
+-- !query schema
+struct<add_months(CAST(2016-08-31 AS DATE), 1):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Max
+
+-- Function name: max
+-- !query
+SELECT max(col) FROM VALUES (10), (50), (20) AS tab(col)
+-- !query schema
+struct<max(col):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapFilter
+
+-- Function name: map_filter
+-- !query
+SELECT map_filter(map(1, 0, 2, 2, 3, -1), (k, v) -> k > v)
+-- !query schema
+struct<map_filter(map(1, 0, 2, 2, 3, -1), 
lambdafunction((namedlambdavariable() > namedlambdavariable()), 
namedlambdavariable(), namedlambdavariable())):map<int,int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Crc32
+
+-- Function name: crc32
+-- !query
+SELECT crc32('Spark')
+-- !query schema
+struct<crc32(CAST(Spark AS BINARY)):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sha2
+
+-- Function name: sha2
+-- !query
+SELECT sha2('Spark', 256)
+-- !query schema
+struct<sha2(CAST(Spark AS BINARY), 256):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Size
+
+-- Function name: size
+-- !query
+SELECT size(array('b', 'd', 'c', 'a'))
+-- !query schema
+struct<size(array(b, d, c, a)):int>
+-- !query
+SELECT size(map('a', 1, 'b', 2))
+-- !query schema
+struct<size(map(a, 1, b, 2)):int>
+-- !query
+SELECT size(NULL)
+-- !query schema
+struct<size(NULL):int>
+
+-- Function name: cardinality
+-- !query
+SELECT cardinality(array('b', 'd', 'c', 'a'))
+-- !query schema
+struct<cardinality(array(b, d, c, a)):int>
+-- !query
+SELECT cardinality(map('a', 1, 'b', 2))
+-- !query schema
+struct<cardinality(map(a, 1, b, 2)):int>
+-- !query
+SELECT cardinality(NULL)
+-- !query schema
+struct<cardinality(NULL):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CurrentTimestamp
+
+-- Function name: current_timestamp
+
+-- Function name: now
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.In
+
+-- Function name: in
+-- !query
+SELECT 1 in(1, 2, 3)
+-- !query schema
+struct<(1 IN (1, 2, 3)):boolean>
+-- !query
+SELECT 1 in(2, 3, 4)
+-- !query schema
+struct<(1 IN (2, 3, 4)):boolean>
+-- !query
+SELECT named_struct('a', 1, 'b', 2) in(named_struct('a', 1, 'b', 1), 
named_struct('a', 1, 'b', 3))
+-- !query schema
+struct<(named_struct(a, 1, b, 2) IN (named_struct(a, 1, b, 1), named_struct(a, 
1, b, 3))):boolean>
+-- !query
+SELECT named_struct('a', 1, 'b', 2) in(named_struct('a', 1, 'b', 2), 
named_struct('a', 1, 'b', 3))
+-- !query schema
+struct<(named_struct(a, 1, b, 2) IN (named_struct(a, 1, b, 2), named_struct(a, 
1, b, 3))):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CurrentDatabase
+
+-- Function name: current_database
+-- !query
+SELECT current_database()
+-- !query schema
+struct<current_database():string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringInstr
+
+-- Function name: instr
+-- !query
+SELECT instr('SparkSQL', 'SQL')
+-- !query schema
+struct<instr(SparkSQL, SQL):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Sum
+
+-- Function name: sum
+-- !query
+SELECT sum(col) FROM VALUES (5), (10), (15) AS tab(col)
+-- !query schema
+struct<sum(col):bigint>
+-- !query
+SELECT sum(col) FROM VALUES (NULL), (10), (15) AS tab(col)
+-- !query schema
+struct<sum(col):bigint>
+-- !query
+SELECT sum(col) FROM VALUES (NULL), (NULL) AS tab(col)
+-- !query schema
+struct<sum(col):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.CountIf
+
+-- Function name: count_if
+-- !query
+SELECT count_if(col % 2 = 0) FROM VALUES (NULL), (0), (1), (2), (3) AS tab(col)
+-- !query schema
+struct<count_if(((col % 2) = 0)):bigint>
+-- !query
+SELECT count_if(col IS NULL) FROM VALUES (NULL), (0), (1), (2), (3) AS tab(col)
+-- !query schema
+struct<count_if((col IS NULL)):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MakeTimestamp
+
+-- Function name: make_timestamp
+-- !query
+SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887)
+-- !query schema
+struct<make_timestamp(2014, 12, 28, 6, 30, CAST(45.887 AS 
DECIMAL(8,6))):timestamp>
+-- !query
+SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887, 'CET')
+-- !query schema
+struct<make_timestamp(2014, 12, 28, 6, 30, CAST(45.887 AS DECIMAL(8,6)), 
CET):timestamp>
+-- !query
+SELECT make_timestamp(2019, 6, 30, 23, 59, 60)
+-- !query schema
+struct<make_timestamp(2019, 6, 30, 23, 59, CAST(60 AS DECIMAL(8,6))):timestamp>
+-- !query
+SELECT make_timestamp(2019, 13, 1, 10, 11, 12, 'PST')
+-- !query schema
+struct<make_timestamp(2019, 13, 1, 10, 11, CAST(12 AS DECIMAL(8,6)), 
PST):timestamp>
+-- !query
+SELECT make_timestamp(null, 7, 22, 15, 30, 0)
+-- !query schema
+struct<make_timestamp(CAST(NULL AS INT), 7, 22, 15, 30, CAST(0 AS 
DECIMAL(8,6))):timestamp>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CsvToStructs
+
+-- Function name: from_csv
+-- !query
+SELECT from_csv('1, 0.8', 'a INT, b DOUBLE')
+-- !query schema
+struct<from_csv(1, 0.8):struct<a:int,b:double>>
+-- !query
+SELECT from_csv('26/08/2015', 'time Timestamp', map('timestampFormat', 
'dd/MM/yyyy'))
+-- !query schema
+struct<from_csv(26/08/2015):struct<time:timestamp>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Remainder
+
+-- Function name: %
+-- !query
+SELECT 2 % 1.8
+-- !query schema
+struct<(CAST(CAST(2 AS DECIMAL(1,0)) AS DECIMAL(2,1)) % CAST(1.8 AS 
DECIMAL(2,1))):decimal(2,1)>
+-- !query
+SELECT MOD(2, 1.8)
+-- !query schema
+struct<(CAST(CAST(2 AS DECIMAL(1,0)) AS DECIMAL(2,1)) % CAST(1.8 AS 
DECIMAL(2,1))):decimal(2,1)>
+
+-- Function name: mod
+-- !query
+SELECT 2 % 1.8
+-- !query schema
+struct<(CAST(CAST(2 AS DECIMAL(1,0)) AS DECIMAL(2,1)) % CAST(1.8 AS 
DECIMAL(2,1))):decimal(2,1)>
+-- !query
+SELECT MOD(2, 1.8)
+-- !query schema
+struct<(CAST(CAST(2 AS DECIMAL(1,0)) AS DECIMAL(2,1)) % CAST(1.8 AS 
DECIMAL(2,1))):decimal(2,1)>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringRepeat
+
+-- Function name: repeat
+-- !query
+SELECT repeat('123', 2)
+-- !query schema
+struct<repeat(123, 2):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SubstringIndex
+
+-- Function name: substring_index
+-- !query
+SELECT substring_index('www.apache.org', '.', 2)
+-- !query schema
+struct<substring_index(www.apache.org, ., 2):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringTrimLeft
+
+-- Function name: ltrim
+-- !query
+SELECT ltrim('    SparkSQL   ')
+-- !query schema
+struct<ltrim(    SparkSQL   ):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringTranslate
+
+-- Function name: translate
+-- !query
+SELECT translate('AaBbCc', 'abc', '123')
+-- !query schema
+struct<translate(AaBbCc, abc, 123):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Greatest
+
+-- Function name: greatest
+-- !query
+SELECT greatest(10, 9, 2, 4, 3)
+-- !query schema
+struct<greatest(10, 9, 2, 4, 3):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayDistinct
+
+-- Function name: array_distinct
+-- !query
+SELECT array_distinct(array(1, 2, 3, null, 3))
+-- !query schema
+struct<array_distinct(array(1, 2, 3, CAST(NULL AS INT), 3)):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringReplace
+
+-- Function name: replace
+-- !query
+SELECT replace('ABCabc', 'abc', 'DEF')
+-- !query schema
+struct<replace(ABCabc, abc, DEF):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathShort
+
+-- Function name: xpath_short
+-- !query
+SELECT xpath_short('<a><b>1</b><b>2</b></a>', 'sum(a/b)')
+-- !query schema
+struct<xpath_short(<a><b>1</b><b>2</b></a>, sum(a/b)):smallint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.BoolOr
+
+-- Function name: bool_or
+-- !query
+SELECT bool_or(col) FROM VALUES (true), (false), (false) AS tab(col)
+-- !query schema
+struct<bool_or(col):boolean>
+-- !query
+SELECT bool_or(col) FROM VALUES (NULL), (true), (false) AS tab(col)
+-- !query schema
+struct<bool_or(col):boolean>
+-- !query
+SELECT bool_or(col) FROM VALUES (false), (false), (NULL) AS tab(col)
+-- !query schema
+struct<bool_or(col):boolean>
+
+-- Function name: some
+-- !query
+SELECT some(col) FROM VALUES (true), (false), (false) AS tab(col)
+-- !query schema
+struct<some(col):boolean>
+-- !query
+SELECT some(col) FROM VALUES (NULL), (true), (false) AS tab(col)
+-- !query schema
+struct<some(col):boolean>
+-- !query
+SELECT some(col) FROM VALUES (false), (false), (NULL) AS tab(col)
+-- !query schema
+struct<some(col):boolean>
+
+-- Function name: any
+-- !query
+SELECT any(col) FROM VALUES (true), (false), (false) AS tab(col)
+-- !query schema
+struct<any(col):boolean>
+-- !query
+SELECT any(col) FROM VALUES (NULL), (true), (false) AS tab(col)
+-- !query schema
+struct<any(col):boolean>
+-- !query
+SELECT any(col) FROM VALUES (false), (false), (NULL) AS tab(col)
+-- !query schema
+struct<any(col):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Murmur3Hash
+
+-- Function name: hash
+-- !query
+SELECT hash('Spark', array(123), 2)
+-- !query schema
+struct<hash(Spark, array(123), 2):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.RLike
+
+-- Function name: rlike
+-- !query
+SET spark.sql.parser.escapedStringLiterals=true
+-- !query schema
+struct<key:string,value:string>
+-- !query
+SELECT '%SystemDrive%\Users\John' rlike '%SystemDrive%\\Users.*'
+-- !query schema
+struct<%SystemDrive%\Users\John RLIKE %SystemDrive%\\Users.*:boolean>
+-- !query
+SET spark.sql.parser.escapedStringLiterals=false
+-- !query schema
+struct<key:string,value:string>
+-- !query
+SELECT '%SystemDrive%\\Users\\John' rlike '%SystemDrive%\\\\Users.*'
+-- !query schema
+struct<%SystemDrive%\Users\John RLIKE %SystemDrive%\\Users.*:boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.If
+
+-- Function name: if
+-- !query
+SELECT if(1 < 2, 'a', 'b')
+-- !query schema
+struct<(IF((1 < 2), a, b)):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Grouping
+
+-- Function name: grouping
+-- !query
+SELECT name, grouping(name), sum(age) FROM VALUES (2, 'Alice'), (5, 'Bob') 
people(age, name) GROUP BY cube(name)
+-- !query schema
+struct<name:string,grouping(name):tinyint,sum(age):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Abs
+
+-- Function name: abs
+-- !query
+SELECT abs(-1)
+-- !query schema
+struct<abs(-1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.InitCap
+
+-- Function name: initcap
+-- !query
+SELECT initcap('sPark sql')
+-- !query schema
+struct<initcap(sPark sql):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Percentile
+
+-- Function name: percentile
+-- !query
+SELECT percentile(col, 0.3) FROM VALUES (0), (10) AS tab(col)
+-- !query schema
+struct<percentile(col, CAST(0.3 AS DOUBLE), 1):double>
+-- !query
+SELECT percentile(col, array(0.25, 0.75)) FROM VALUES (0), (10) AS tab(col)
+-- !query schema
+struct<percentile(col, array(0.25, 0.75), 1):array<double>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.IsNotNull
+
+-- Function name: isnotnull
+-- !query
+SELECT isnotnull(1)
+-- !query schema
+struct<(1 IS NOT NULL):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Cbrt
+
+-- Function name: cbrt
+-- !query
+SELECT cbrt(27.0)
+-- !query schema
+struct<CBRT(CAST(27.0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BitwiseNot
+
+-- Function name: ~
+-- !query
+SELECT ~ 0
+-- !query schema
+struct<~0:int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Last
+
+-- Function name: last_value
+-- !query
+SELECT last_value(col) FROM VALUES (10), (5), (20) AS tab(col)
+-- !query schema
+struct<last_value(col, false):int>
+-- !query
+SELECT last_value(col) FROM VALUES (10), (5), (NULL) AS tab(col)
+-- !query schema
+struct<last_value(col, false):int>
+-- !query
+SELECT last_value(col, true) FROM VALUES (10), (5), (NULL) AS tab(col)
+-- !query schema
+struct<last_value(col, true):int>
+
+-- Function name: last
+-- !query
+SELECT last(col) FROM VALUES (10), (5), (20) AS tab(col)
+-- !query schema
+struct<last(col, false):int>
+-- !query
+SELECT last(col) FROM VALUES (10), (5), (NULL) AS tab(col)
+-- !query schema
+struct<last(col, false):int>
+-- !query
+SELECT last(col, true) FROM VALUES (10), (5), (NULL) AS tab(col)
+-- !query schema
+struct<last(col, true):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.NullIf
+
+-- Function name: nullif
+-- !query
+SELECT nullif(2, 2)
+-- !query schema
+struct<nullif(2, 2):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Month
+
+-- Function name: month
+-- !query
+SELECT month('2016-07-30')
+-- !query schema
+struct<month(CAST(2016-07-30 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Logarithm
+
+-- Function name: log
+-- !query
+SELECT log(10, 100)
+-- !query schema
+struct<LOG(CAST(10 AS DOUBLE), CAST(100 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Subtract
+
+-- Function name: -
+-- !query
+SELECT 2 - 1
+-- !query schema
+struct<(2 - 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DateAdd
+
+-- Function name: date_add
+-- !query
+SELECT date_add('2016-07-30', 1)
+-- !query schema
+struct<date_add(CAST(2016-07-30 AS DATE), 1):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MakeDate
+
+-- Function name: make_date
+-- !query
+SELECT make_date(2013, 7, 15)
+-- !query schema
+struct<make_date(2013, 7, 15):date>
+-- !query
+SELECT make_date(2019, 13, 1)
+-- !query schema
+struct<make_date(2019, 13, 1):date>
+-- !query
+SELECT make_date(2019, 7, NULL)
+-- !query schema
+struct<make_date(2019, 7, CAST(NULL AS INT)):date>
+-- !query
+SELECT make_date(2019, 2, 30)
+-- !query schema
+struct<make_date(2019, 2, 30):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.JsonToStructs
+
+-- Function name: from_json
+-- !query
+SELECT from_json('{"a":1, "b":0.8}', 'a INT, b DOUBLE')
+-- !query schema
+struct<from_json({"a":1, "b":0.8}):struct<a:int,b:double>>
+-- !query
+SELECT from_json('{"time":"26/08/2015"}', 'time Timestamp', 
map('timestampFormat', 'dd/MM/yyyy'))
+-- !query schema
+struct<from_json({"time":"26/08/2015"}):struct<time:timestamp>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ZipWith
+
+-- Function name: zip_with
+-- !query
+SELECT zip_with(array(1, 2, 3), array('a', 'b', 'c'), (x, y) -> (y, x))
+-- !query schema
+struct<zip_with(array(1, 2, 3), array(a, b, c), lambdafunction(named_struct(y, 
namedlambdavariable(), x, namedlambdavariable()), namedlambdavariable(), 
namedlambdavariable())):array<struct<y:string,x:int>>>
+-- !query
+SELECT zip_with(array(1, 2), array(3, 4), (x, y) -> x + y)
+-- !query schema
+struct<zip_with(array(1, 2), array(3, 4), 
lambdafunction((namedlambdavariable() + namedlambdavariable()), 
namedlambdavariable(), namedlambdavariable())):array<int>>
+-- !query
+SELECT zip_with(array('a', 'b', 'c'), array('d', 'e', 'f'), (x, y) -> 
concat(x, y))
+-- !query schema
+struct<zip_with(array(a, b, c), array(d, e, f), 
lambdafunction(concat(namedlambdavariable(), namedlambdavariable()), 
namedlambdavariable(), namedlambdavariable())):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.NamedStruct
+
+-- Function name: struct
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Tan
+
+-- Function name: tan
+-- !query
+SELECT tan(0)
+-- !query schema
+struct<TAN(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.EulerNumber
+
+-- Function name: e
+-- !query
+SELECT e()
+-- !query schema
+struct<E():double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringToMap
+
+-- Function name: str_to_map
+-- !query
+SELECT str_to_map('a:1,b:2,c:3', ',', ':')
+-- !query schema
+struct<str_to_map(a:1,b:2,c:3, ,, :):map<string,string>>
+-- !query
+SELECT str_to_map('a')
+-- !query schema
+struct<str_to_map(a, ,, :):map<string,string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArraySort
+
+-- Function name: array_sort
+-- !query
+SELECT array_sort(array(5, 6, 1), (left, right) -> case when left < right then 
-1 when left > right then 1 else 0 end)
+-- !query schema
+struct<array_sort(array(5, 6, 1), lambdafunction(CASE WHEN 
(namedlambdavariable() < namedlambdavariable()) THEN -1 WHEN 
(namedlambdavariable() > namedlambdavariable()) THEN 1 ELSE 0 END, 
namedlambdavariable(), namedlambdavariable())):array<int>>
+-- !query
+SELECT array_sort(array('bc', 'ab', 'dc'), (left, right) -> case when left is 
null and right is null then 0 when left is null then -1 when right is null then 
1 when left < right then 1 when left > right then -1 else 0 end)
+-- !query schema
+struct<array_sort(array(bc, ab, dc), lambdafunction(CASE WHEN 
((namedlambdavariable() IS NULL) AND (namedlambdavariable() IS NULL)) THEN 0 
WHEN (namedlambdavariable() IS NULL) THEN -1 WHEN (namedlambdavariable() IS 
NULL) THEN 1 WHEN (namedlambdavariable() < namedlambdavariable()) THEN 1 WHEN 
(namedlambdavariable() > namedlambdavariable()) THEN -1 ELSE 0 END, 
namedlambdavariable(), namedlambdavariable())):array<string>>
+-- !query
+SELECT array_sort(array('b', 'd', null, 'c', 'a'))
+-- !query schema
+struct<array_sort(array(b, d, CAST(NULL AS STRING), c, a), 
lambdafunction((IF(((namedlambdavariable() IS NULL) AND (namedlambdavariable() 
IS NULL)), 0, (IF((namedlambdavariable() IS NULL), 1, 
(IF((namedlambdavariable() IS NULL), -1, (IF((namedlambdavariable() < 
namedlambdavariable()), -1, (IF((namedlambdavariable() > 
namedlambdavariable()), 1, 0)))))))))), namedlambdavariable(), 
namedlambdavariable())):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Cast
+
+-- Function name: string
+
+-- Function name: cast
+-- !query
+SELECT cast('10' as int)
+-- !query schema
+struct<CAST(10 AS INT):int>
+
+-- Function name: tinyint
+
+-- Function name: double
+
+-- Function name: smallint
+
+-- Function name: date
+
+-- Function name: decimal
+
+-- Function name: boolean
+
+-- Function name: float
+
+-- Function name: binary
+
+-- Function name: bigint
+
+-- Function name: int
+
+-- Function name: timestamp
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Min
+
+-- Function name: min
+-- !query
+SELECT min(col) FROM VALUES (10), (-1), (20) AS tab(col)
+-- !query schema
+struct<min(col):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Average
+
+-- Function name: avg
+-- !query
+SELECT avg(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<avg(col):double>
+-- !query
+SELECT avg(col) FROM VALUES (1), (2), (NULL) AS tab(col)
+-- !query schema
+struct<avg(col):double>
+
+-- Function name: mean
+-- !query
+SELECT mean(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<mean(col):double>
+-- !query
+SELECT mean(col) FROM VALUES (1), (2), (NULL) AS tab(col)
+-- !query schema
+struct<mean(col):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SortArray
+
+-- Function name: sort_array
+-- !query
+SELECT sort_array(array('b', 'd', null, 'c', 'a'), true)
+-- !query schema
+struct<sort_array(array(b, d, CAST(NULL AS STRING), c, a), true):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.NextDay
+
+-- Function name: next_day
+-- !query
+SELECT next_day('2015-01-14', 'TU')
+-- !query schema
+struct<next_day(CAST(2015-01-14 AS DATE), TU):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Ascii
+
+-- Function name: ascii
+-- !query
+SELECT ascii('222')
+-- !query schema
+struct<ascii(222):int>
+-- !query
+SELECT ascii(2)
+-- !query schema
+struct<ascii(CAST(2 AS STRING)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayRemove
+
+-- Function name: array_remove
+-- !query
+SELECT array_remove(array(1, 2, 3, null, 3), 3)
+-- !query schema
+struct<array_remove(array(1, 2, 3, CAST(NULL AS INT), 3), 3):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Pow
+
+-- Function name: pow
+-- !query
+SELECT pow(2, 3)
+-- !query schema
+struct<pow(CAST(2 AS DOUBLE), CAST(3 AS DOUBLE)):double>
+
+-- Function name: power
+-- !query
+SELECT power(2, 3)
+-- !query schema
+struct<POWER(CAST(2 AS DOUBLE), CAST(3 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.LessThan
+
+-- Function name: <
+-- !query
+SELECT 1 < 2
+-- !query schema
+struct<(1 < 2):boolean>
+-- !query
+SELECT 1.1 < '1'
+-- !query schema
+struct<(CAST(1.1 AS DOUBLE) < CAST(1 AS DOUBLE)):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') < to_date('2009-07-30 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') < to_date('2009-07-30 
04:17:52')):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') < to_date('2009-08-01 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') < to_date('2009-08-01 
04:17:52')):boolean>
+-- !query
+SELECT 1 < NULL
+-- !query schema
+struct<(1 < CAST(NULL AS INT)):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapKeys
+
+-- Function name: map_keys
+-- !query
+SELECT map_keys(map(1, 'a', 2, 'b'))
+-- !query schema
+struct<map_keys(map(1, a, 2, b)):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Inline
+
+-- Function name: inline
+-- !query
+SELECT inline(array(struct(1, 'a'), struct(2, 'b')))
+-- !query schema
+struct<col1:int,col2:string>
+
+-- Function name: inline_outer
+-- !query
+SELECT inline_outer(array(struct(1, 'a'), struct(2, 'b')))
+-- !query schema
+struct<col1:int,col2:string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapZipWith
+
+-- Function name: map_zip_with
+-- !query
+SELECT map_zip_with(map(1, 'a', 2, 'b'), map(1, 'x', 2, 'y'), (k, v1, v2) -> 
concat(v1, v2))
+-- !query schema
+struct<map_zip_with(map(1, a, 2, b), map(1, x, 2, y), 
lambdafunction(concat(namedlambdavariable(), namedlambdavariable()), 
namedlambdavariable(), namedlambdavariable(), 
namedlambdavariable())):map<int,string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Encode
+
+-- Function name: encode
+-- !query
+SELECT encode('abc', 'utf-8')
+-- !query schema
+struct<encode(abc, utf-8):binary>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayJoin
+
+-- Function name: array_join
+-- !query
+SELECT array_join(array('hello', 'world'), ' ')
+-- !query schema
+struct<array_join(array(hello, world),  ):string>
+-- !query
+SELECT array_join(array('hello', null ,'world'), ' ')
+-- !query schema
+struct<array_join(array(hello, CAST(NULL AS STRING), world),  ):string>
+-- !query
+SELECT array_join(array('hello', null ,'world'), ' ', ',')
+-- !query schema
+struct<array_join(array(hello, CAST(NULL AS STRING), world),  , ,):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.And
+
+-- Function name: and
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Hypot
+
+-- Function name: hypot
+-- !query
+SELECT hypot(3, 4)
+-- !query schema
+struct<HYPOT(CAST(3 AS DOUBLE), CAST(4 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Round
+
+-- Function name: round
+-- !query
+SELECT round(2.5, 0)
+-- !query schema
+struct<round(2.5, 0):decimal(2,0)>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.CovSample
+
+-- Function name: covar_samp
+-- !query
+SELECT covar_samp(c1, c2) FROM VALUES (1,1), (2,2), (3,3) AS tab(c1, c2)
+-- !query schema
+struct<covar_samp(CAST(c1 AS DOUBLE), CAST(c2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Pi
+
+-- Function name: pi
+-- !query
+SELECT pi()
+-- !query schema
+struct<PI():double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sqrt
+
+-- Function name: sqrt
+-- !query
+SELECT sqrt(4)
+-- !query schema
+struct<SQRT(CAST(4 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.TransformKeys
+
+-- Function name: transform_keys
+-- !query
+SELECT transform_keys(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, v) 
-> k + 1)
+-- !query schema
+struct<transform_keys(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), 
lambdafunction((namedlambdavariable() + 1), namedlambdavariable(), 
namedlambdavariable())):map<int,int>>
+-- !query
+SELECT transform_keys(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, v) 
-> k + v)
+-- !query schema
+struct<transform_keys(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), 
lambdafunction((namedlambdavariable() + namedlambdavariable()), 
namedlambdavariable(), namedlambdavariable())):map<int,int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Substring
+
+-- Function name: substr
+-- !query
+SELECT substr('Spark SQL', 5)
+-- !query schema
+struct<substr(Spark SQL, 5, 2147483647):string>
+-- !query
+SELECT substr('Spark SQL', -3)
+-- !query schema
+struct<substr(Spark SQL, -3, 2147483647):string>
+-- !query
+SELECT substr('Spark SQL', 5, 1)
+-- !query schema
+struct<substr(Spark SQL, 5, 1):string>
+
+-- Function name: substring
+-- !query
+SELECT substring('Spark SQL', 5)
+-- !query schema
+struct<substring(Spark SQL, 5, 2147483647):string>
+-- !query
+SELECT substring('Spark SQL', -3)
+-- !query schema
+struct<substring(Spark SQL, -3, 2147483647):string>
+-- !query
+SELECT substring('Spark SQL', 5, 1)
+-- !query schema
+struct<substring(Spark SQL, 5, 1):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Asinh
+
+-- Function name: asinh
+-- !query
+SELECT asinh(0)
+-- !query schema
+struct<ASINH(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Second
+
+-- Function name: second
+-- !query
+SELECT second('2009-07-30 12:58:59')
+-- !query schema
+struct<second(CAST(2009-07-30 12:58:59 AS TIMESTAMP)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ToUTCTimestamp
+
+-- Function name: to_utc_timestamp
+-- !query
+SELECT to_utc_timestamp('2016-08-31', 'Asia/Seoul')
+-- !query schema
+struct<to_utc_timestamp(CAST(2016-08-31 AS TIMESTAMP), Asia/Seoul):timestamp>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Upper
+
+-- Function name: ucase
+-- !query
+SELECT ucase('SparkSql')
+-- !query schema
+struct<ucase(SparkSql):string>
+
+-- Function name: upper
+-- !query
+SELECT upper('SparkSql')
+-- !query schema
+struct<upper(SparkSql):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.BitAndAgg
+
+-- Function name: bit_and
+-- !query
+SELECT bit_and(col) FROM VALUES (3), (5) AS tab(col)
+-- !query schema
+struct<bit_and(col):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Stack
+
+-- Function name: stack
+-- !query
+SELECT stack(2, 1, 2, 3)
+-- !query schema
+struct<col0:int,col1:int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.EqualTo
+
+-- Function name: =
+-- !query
+SELECT 2 = 2
+-- !query schema
+struct<(2 = 2):boolean>
+-- !query
+SELECT 1 = '1'
+-- !query schema
+struct<(1 = CAST(1 AS INT)):boolean>
+-- !query
+SELECT true = NULL
+-- !query schema
+struct<(true = CAST(NULL AS BOOLEAN)):boolean>
+-- !query
+SELECT NULL = NULL
+-- !query schema
+struct<(NULL = NULL):boolean>
+
+-- Function name: ==
+-- !query
+SELECT 2 == 2
+-- !query schema
+struct<(2 = 2):boolean>
+-- !query
+SELECT 1 == '1'
+-- !query schema
+struct<(1 = CAST(1 AS INT)):boolean>
+-- !query
+SELECT true == NULL
+-- !query schema
+struct<(true = CAST(NULL AS BOOLEAN)):boolean>
+-- !query
+SELECT NULL == NULL
+-- !query schema
+struct<(NULL = NULL):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringLPad
+
+-- Function name: lpad
+-- !query
+SELECT lpad('hi', 5, '??')
+-- !query schema
+struct<lpad(hi, 5, ??):string>
+-- !query
+SELECT lpad('hi', 1, '??')
+-- !query schema
+struct<lpad(hi, 1, ??):string>
+-- !query
+SELECT lpad('hi', 5)
+-- !query schema
+struct<lpad(hi, 5,  ):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapFromEntries
+
+-- Function name: map_from_entries
+-- !query
+SELECT map_from_entries(array(struct(1, 'a'), struct(2, 'b')))
+-- !query schema
+struct<map_from_entries(array(named_struct(col1, 1, col2, a), 
named_struct(col1, 2, col2, b))):map<int,string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Cube
+
+-- Function name: cube
+-- !query
+SELECT name, age, count(*) FROM VALUES (2, 'Alice'), (5, 'Bob') people(age, 
name) GROUP BY cube(name, age)
+-- !query schema
+struct<name:string,age:int,count(1):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Divide
+
+-- Function name: /
+-- !query
+SELECT 3 / 2
+-- !query schema
+struct<(CAST(3 AS DOUBLE) / CAST(2 AS DOUBLE)):double>
+-- !query
+SELECT 2L / 2L
+-- !query schema
+struct<(CAST(2 AS DOUBLE) / CAST(2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Like
+
+-- Function name: like
+-- !query
+SELECT like('Spark', '_park')
+-- !query schema
+struct<Spark LIKE _park:boolean>
+-- !query
+SET spark.sql.parser.escapedStringLiterals=true
+-- !query schema
+struct<key:string,value:string>
+-- !query
+SELECT '%SystemDrive%\Users\John' like '\%SystemDrive\%\\Users%'
+-- !query schema
+struct<%SystemDrive%\Users\John LIKE \%SystemDrive\%\\Users%:boolean>
+-- !query
+SET spark.sql.parser.escapedStringLiterals=false
+-- !query schema
+struct<key:string,value:string>
+-- !query
+SELECT '%SystemDrive%\\Users\\John' like '\%SystemDrive\%\\\\Users%'
+-- !query schema
+struct<%SystemDrive%\Users\John LIKE \%SystemDrive\%\\Users%:boolean>
+-- !query
+SELECT '%SystemDrive%/Users/John' like '/%SystemDrive/%//Users%' ESCAPE '/'
+-- !query schema
+struct<%SystemDrive%/Users/John LIKE /%SystemDrive/%//Users%:boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.OctetLength
+
+-- Function name: octet_length
+-- !query
+SELECT octet_length('Spark SQL')
+-- !query schema
+struct<octet_length(Spark SQL):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CaseWhen
+
+-- Function name: when
+-- !query
+SELECT CASE WHEN 1 > 0 THEN 1 WHEN 2 > 0 THEN 2.0 ELSE 1.2 END
+-- !query schema
+struct<CASE WHEN (1 > 0) THEN CAST(1 AS DECIMAL(11,1)) WHEN (2 > 0) THEN 
CAST(2.0 AS DECIMAL(11,1)) ELSE CAST(1.2 AS DECIMAL(11,1)) END:decimal(11,1)>
+-- !query
+SELECT CASE WHEN 1 < 0 THEN 1 WHEN 2 > 0 THEN 2.0 ELSE 1.2 END
+-- !query schema
+struct<CASE WHEN (1 < 0) THEN CAST(1 AS DECIMAL(11,1)) WHEN (2 > 0) THEN 
CAST(2.0 AS DECIMAL(11,1)) ELSE CAST(1.2 AS DECIMAL(11,1)) END:decimal(11,1)>
+-- !query
+SELECT CASE WHEN 1 < 0 THEN 1 WHEN 2 < 0 THEN 2.0 END
+-- !query schema
+struct<CASE WHEN (1 < 0) THEN CAST(1 AS DECIMAL(11,1)) WHEN (2 < 0) THEN 
CAST(2.0 AS DECIMAL(11,1)) END:decimal(11,1)>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Log
+
+-- Function name: ln
+-- !query
+SELECT ln(1)
+-- !query schema
+struct<LOG(CAST(1 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BitwiseCount
+
+-- Function name: bit_count
+-- !query
+SELECT bit_count(0)
+-- !query schema
+struct<bit_count(0):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Acos
+
+-- Function name: acos
+-- !query
+SELECT acos(1)
+-- !query schema
+struct<ACOS(CAST(1 AS DOUBLE)):double>
+-- !query
+SELECT acos(2)
+-- !query schema
+struct<ACOS(CAST(2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SparkPartitionID
+
+-- Function name: spark_partition_id
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DateFormatClass
+
+-- Function name: date_format
+-- !query
+SELECT date_format('2016-04-08', 'y')
+-- !query schema
+struct<date_format(CAST(2016-04-08 AS TIMESTAMP), y):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.FromUnixTime
+
+-- Function name: from_unixtime
+-- !query
+SELECT from_unixtime(0, 'yyyy-MM-dd HH:mm:ss')
+-- !query schema
+struct<from_unixtime(CAST(0 AS BIGINT), yyyy-MM-dd HH:mm:ss):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Floor
+
+-- Function name: floor
+-- !query
+SELECT floor(-0.1)
+-- !query schema
+struct<FLOOR(-0.1):decimal(1,0)>
+-- !query
+SELECT floor(5)
+-- !query schema
+struct<FLOOR(CAST(5 AS DOUBLE)):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SchemaOfCsv
+
+-- Function name: schema_of_csv
+-- !query
+SELECT schema_of_csv('1,abc')
+-- !query schema
+struct<schema_of_csv(1,abc):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Log2
+
+-- Function name: log2
+-- !query
+SELECT log2(2)
+-- !query schema
+struct<LOG2(CAST(2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DateSub
+
+-- Function name: date_sub
+-- !query
+SELECT date_sub('2016-07-30', 1)
+-- !query schema
+struct<date_sub(CAST(2016-07-30 AS DATE), 1):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.NTile
+
+-- Function name: ntile
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.RowNumber
+
+-- Function name: row_number
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CreateMap
+
+-- Function name: map
+-- !query
+SELECT map(1.0, '2', 3.0, '4')
+-- !query schema
+struct<map(1.0, 2, 3.0, 4):map<decimal(2,1),string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.BitOrAgg
+
+-- Function name: bit_or
+-- !query
+SELECT bit_or(col) FROM VALUES (3), (5) AS tab(col)
+-- !query schema
+struct<bit_or(col):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DayOfYear
+
+-- Function name: dayofyear
+-- !query
+SELECT dayofyear('2016-04-09')
+-- !query schema
+struct<dayofyear(CAST(2016-04-09 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.IsNull
+
+-- Function name: isnull
+-- !query
+SELECT isnull(1)
+-- !query schema
+struct<(1 IS NULL):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Ceil
+
+-- Function name: ceil
+-- !query
+SELECT ceil(-0.1)
+-- !query schema
+struct<CEIL(-0.1):decimal(1,0)>
+-- !query
+SELECT ceil(5)
+-- !query schema
+struct<CEIL(CAST(5 AS DOUBLE)):bigint>
+
+-- Function name: ceiling
+-- !query
+SELECT ceiling(-0.1)
+-- !query schema
+struct<ceiling(-0.1):decimal(1,0)>
+-- !query
+SELECT ceiling(5)
+-- !query schema
+struct<ceiling(CAST(5 AS DOUBLE)):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Asin
+
+-- Function name: asin
+-- !query
+SELECT asin(0)
+-- !query schema
+struct<ASIN(CAST(0 AS DOUBLE)):double>
+-- !query
+SELECT asin(2)
+-- !query schema
+struct<ASIN(CAST(2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Count
+
+-- Function name: count
+-- !query
+SELECT count(*) FROM VALUES (NULL), (5), (5), (20) AS tab(col)
+-- !query schema
+struct<count(1):bigint>
+-- !query
+SELECT count(col) FROM VALUES (NULL), (5), (5), (20) AS tab(col)
+-- !query schema
+struct<count(col):bigint>
+-- !query
+SELECT count(DISTINCT col) FROM VALUES (NULL), (5), (5), (10) AS tab(col)
+-- !query schema
+struct<count(DISTINCT col):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Minute
+
+-- Function name: minute
+-- !query
+SELECT minute('2009-07-30 12:58:59')
+-- !query schema
+struct<minute(CAST(2009-07-30 12:58:59 AS TIMESTAMP)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.PercentRank
+
+-- Function name: percent_rank
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathList
+
+-- Function name: xpath
+-- !query
+SELECT 
xpath('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>','a/b/text()')
+-- !query schema
+struct<xpath(<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>, 
a/b/text()):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.IntegralDivide
+
+-- Function name: div
+-- !query
+SELECT 3 div 2
+-- !query schema
+struct<(3 div 2):bigint>
+
+
+-- Class name: 
org.apache.spark.sql.catalyst.expressions.aggregate.CovPopulation
+
+-- Function name: covar_pop
+-- !query
+SELECT covar_pop(c1, c2) FROM VALUES (1,1), (2,2), (3,3) AS tab(c1, c2)
+-- !query schema
+struct<covar_pop(CAST(c1 AS DOUBLE), CAST(c2 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathDouble
+
+-- Function name: xpath_number
+-- !query
+SELECT xpath_number('<a><b>1</b><b>2</b></a>', 'sum(a/b)')
+-- !query schema
+struct<xpath_number(<a><b>1</b><b>2</b></a>, sum(a/b)):double>
+
+-- Function name: xpath_double
+-- !query
+SELECT xpath_double('<a><b>1</b><b>2</b></a>', 'sum(a/b)')
+-- !query schema
+struct<xpath_double(<a><b>1</b><b>2</b></a>, sum(a/b)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.SparkVersion
+
+-- Function name: version
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Not
+
+-- Function name: !
+
+-- Function name: not
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ShiftRight
+
+-- Function name: shiftright
+-- !query
+SELECT shiftright(4, 1)
+-- !query schema
+struct<shiftright(4, 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sin
+
+-- Function name: sin
+-- !query
+SELECT sin(0)
+-- !query schema
+struct<SIN(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ToRadians
+
+-- Function name: radians
+-- !query
+SELECT radians(180)
+-- !query schema
+struct<RADIANS(CAST(180 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.TransformValues
+
+-- Function name: transform_values
+-- !query
+SELECT transform_values(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, 
v) -> v + 1)
+-- !query schema
+struct<transform_values(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), 
lambdafunction((namedlambdavariable() + 1), namedlambdavariable(), 
namedlambdavariable())):map<int,int>>
+-- !query
+SELECT transform_values(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), (k, 
v) -> k + v)
+-- !query schema
+struct<transform_values(map_from_arrays(array(1, 2, 3), array(1, 2, 3)), 
lambdafunction((namedlambdavariable() + namedlambdavariable()), 
namedlambdavariable(), namedlambdavariable())):map<int,int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayUnion
+
+-- Function name: array_union
+-- !query
+SELECT array_union(array(1, 2, 3), array(1, 3, 5))
+-- !query schema
+struct<array_union(array(1, 2, 3), array(1, 3, 5)):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Kurtosis
+
+-- Function name: kurtosis
+-- !query
+SELECT kurtosis(col) FROM VALUES (-10), (-20), (100), (1000) AS tab(col)
+-- !query schema
+struct<kurtosis(CAST(col AS DOUBLE)):double>
+-- !query
+SELECT kurtosis(col) FROM VALUES (1), (10), (100), (10), (1) as tab(col)
+-- !query schema
+struct<kurtosis(CAST(col AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Signum
+
+-- Function name: signum
+-- !query
+SELECT signum(40)
+-- !query schema
+struct<SIGNUM(CAST(40 AS DOUBLE)):double>
+
+-- Function name: sign
+-- !query
+SELECT sign(40)
+-- !query schema
+struct<sign(CAST(40 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Overlay
+
+-- Function name: overlay
+-- !query
+SELECT overlay('Spark SQL' PLACING '_' FROM 6)
+-- !query schema
+struct<overlay(Spark SQL, _, 6, -1):string>
+-- !query
+SELECT overlay('Spark SQL' PLACING 'CORE' FROM 7)
+-- !query schema
+struct<overlay(Spark SQL, CORE, 7, -1):string>
+-- !query
+SELECT overlay('Spark SQL' PLACING 'ANSI ' FROM 7 FOR 0)
+-- !query schema
+struct<overlay(Spark SQL, ANSI , 7, 0):string>
+-- !query
+SELECT overlay('Spark SQL' PLACING 'tructured' FROM 2 FOR 4)
+-- !query schema
+struct<overlay(Spark SQL, tructured, 2, 4):string>
+-- !query
+SELECT overlay(encode('Spark SQL', 'utf-8') PLACING encode('_', 'utf-8') FROM 
6)
+-- !query schema
+struct<overlay(encode(Spark SQL, utf-8), encode(_, utf-8), 6, -1):binary>
+-- !query
+SELECT overlay(encode('Spark SQL', 'utf-8') PLACING encode('CORE', 'utf-8') 
FROM 7)
+-- !query schema
+struct<overlay(encode(Spark SQL, utf-8), encode(CORE, utf-8), 7, -1):binary>
+-- !query
+SELECT overlay(encode('Spark SQL', 'utf-8') PLACING encode('ANSI ', 'utf-8') 
FROM 7 FOR 0)
+-- !query schema
+struct<overlay(encode(Spark SQL, utf-8), encode(ANSI , utf-8), 7, 0):binary>
+-- !query
+SELECT overlay(encode('Spark SQL', 'utf-8') PLACING encode('tructured', 
'utf-8') FROM 2 FOR 4)
+-- !query schema
+struct<overlay(encode(Spark SQL, utf-8), encode(tructured, utf-8), 2, 
4):binary>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Sha1
+
+-- Function name: sha1
+-- !query
+SELECT sha1('Spark')
+-- !query schema
+struct<sha1(CAST(Spark AS BINARY)):string>
+
+-- Function name: sha
+-- !query
+SELECT sha('Spark')
+-- !query schema
+struct<sha(CAST(Spark AS BINARY)):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.TruncTimestamp
+
+-- Function name: date_trunc
+-- !query
+SELECT date_trunc('YEAR', '2015-03-05T09:32:05.359')
+-- !query schema
+struct<date_trunc(YEAR, CAST(2015-03-05T09:32:05.359 AS TIMESTAMP)):timestamp>
+-- !query
+SELECT date_trunc('MM', '2015-03-05T09:32:05.359')
+-- !query schema
+struct<date_trunc(MM, CAST(2015-03-05T09:32:05.359 AS TIMESTAMP)):timestamp>
+-- !query
+SELECT date_trunc('DD', '2015-03-05T09:32:05.359')
+-- !query schema
+struct<date_trunc(DD, CAST(2015-03-05T09:32:05.359 AS TIMESTAMP)):timestamp>
+-- !query
+SELECT date_trunc('HOUR', '2015-03-05T09:32:05.359')
+-- !query schema
+struct<date_trunc(HOUR, CAST(2015-03-05T09:32:05.359 AS TIMESTAMP)):timestamp>
+-- !query
+SELECT date_trunc('MILLISECOND', '2015-03-05T09:32:05.123456')
+-- !query schema
+struct<date_trunc(MILLISECOND, CAST(2015-03-05T09:32:05.123456 AS 
TIMESTAMP)):timestamp>
+-- !query
+SELECT date_trunc('DECADE', '2015-03-05T09:32:05.123456')
+-- !query schema
+struct<date_trunc(DECADE, CAST(2015-03-05T09:32:05.123456 AS 
TIMESTAMP)):timestamp>
+-- !query
+SELECT date_trunc('CENTURY', '2015-03-05T09:32:05.123456')
+-- !query schema
+struct<date_trunc(CENTURY, CAST(2015-03-05T09:32:05.123456 AS 
TIMESTAMP)):timestamp>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.CollectSet
+
+-- Function name: collect_set
+-- !query
+SELECT collect_set(col) FROM VALUES (1), (2), (1) AS tab(col)
+-- !query schema
+struct<collect_set(col):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Factorial
+
+-- Function name: factorial
+-- !query
+SELECT factorial(5)
+-- !query schema
+struct<factorial(5):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BitLength
+
+-- Function name: bit_length
+-- !query
+SELECT bit_length('Spark SQL')
+-- !query schema
+struct<bit_length(Spark SQL):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StructsToCsv
+
+-- Function name: to_csv
+-- !query
+SELECT to_csv(named_struct('a', 1, 'b', 2))
+-- !query schema
+struct<to_csv(named_struct(a, 1, b, 2)):string>
+-- !query
+SELECT to_csv(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), 
map('timestampFormat', 'dd/MM/yyyy'))
+-- !query schema
+struct<to_csv(named_struct(time, to_timestamp('2015-08-26', 
'yyyy-MM-dd'))):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.XxHash64
+
+-- Function name: xxhash64
+-- !query
+SELECT xxhash64('Spark', array(123), 2)
+-- !query schema
+struct<xxhash64(Spark, array(123), 2):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.IfNull
+
+-- Function name: ifnull
+-- !query
+SELECT ifnull(NULL, array('2'))
+-- !query schema
+struct<ifnull(NULL, array('2')):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Flatten
+
+-- Function name: flatten
+-- !query
+SELECT flatten(array(array(1, 2), array(3, 4)))
+-- !query schema
+struct<flatten(array(array(1, 2), array(3, 4))):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.CollectList
+
+-- Function name: collect_list
+-- !query
+SELECT collect_list(col) FROM VALUES (1), (2), (1) AS tab(col)
+-- !query schema
+struct<collect_list(col):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BitwiseOr
+
+-- Function name: |
+-- !query
+SELECT 3 | 5
+-- !query schema
+struct<(3 | 5):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Or
+
+-- Function name: or
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayRepeat
+
+-- Function name: array_repeat
+-- !query
+SELECT array_repeat('123', 2)
+-- !query schema
+struct<array_repeat(123, 2):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathString
+
+-- Function name: xpath_string
+-- !query
+SELECT xpath_string('<a><b>b</b><c>cc</c></a>','a/c')
+-- !query schema
+struct<xpath_string(<a><b>b</b><c>cc</c></a>, a/c):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayMax
+
+-- Function name: array_max
+-- !query
+SELECT array_max(array(1, 20, null, 3))
+-- !query schema
+struct<array_max(array(1, 20, CAST(NULL AS INT), 3)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringTrim
+
+-- Function name: trim
+-- !query
+SELECT trim('    SparkSQL   ')
+-- !query schema
+struct<trim(    SparkSQL   ):string>
+-- !query
+SELECT trim(BOTH FROM '    SparkSQL   ')
+-- !query schema
+struct<trim(    SparkSQL   ):string>
+-- !query
+SELECT trim(LEADING FROM '    SparkSQL   ')
+-- !query schema
+struct<ltrim(    SparkSQL   ):string>
+-- !query
+SELECT trim(TRAILING FROM '    SparkSQL   ')
+-- !query schema
+struct<rtrim(    SparkSQL   ):string>
+-- !query
+SELECT trim('SL' FROM 'SSparkSQLS')
+-- !query schema
+struct<trim(SSparkSQLS, SL):string>
+-- !query
+SELECT trim(BOTH 'SL' FROM 'SSparkSQLS')
+-- !query schema
+struct<trim(SSparkSQLS, SL):string>
+-- !query
+SELECT trim(LEADING 'SL' FROM 'SSparkSQLS')
+-- !query schema
+struct<ltrim(SSparkSQLS, SL):string>
+-- !query
+SELECT trim(TRAILING 'SL' FROM 'SSparkSQLS')
+-- !query schema
+struct<rtrim(SSparkSQLS, SL):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.IsNaN
+
+-- Function name: isnan
+-- !query
+SELECT isnan(cast('NaN' as double))
+-- !query schema
+struct<isnan(CAST(NaN AS DOUBLE)):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Levenshtein
+
+-- Function name: levenshtein
+-- !query
+SELECT levenshtein('kitten', 'sitting')
+-- !query schema
+struct<levenshtein(kitten, sitting):int>
+
+
+-- Class name: 
org.apache.spark.sql.catalyst.expressions.aggregate.HyperLogLogPlusPlus
+
+-- Function name: approx_count_distinct
+-- !query
+SELECT approx_count_distinct(col1) FROM VALUES (1), (1), (2), (2), (3) 
tab(col1)
+-- !query schema
+struct<approx_count_distinct(col1):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapConcat
+
+-- Function name: map_concat
+-- !query
+SELECT map_concat(map(1, 'a', 2, 'b'), map(3, 'c'))
+-- !query schema
+struct<map_concat(map(1, a, 2, b), map(3, c)):map<int,string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Atan
+
+-- Function name: atan
+-- !query
+SELECT atan(0)
+-- !query schema
+struct<ATAN(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.xml.XPathFloat
+
+-- Function name: xpath_float
+-- !query
+SELECT xpath_float('<a><b>1</b><b>2</b></a>', 'sum(a/b)')
+-- !query schema
+struct<xpath_float(<a><b>1</b><b>2</b></a>, sum(a/b)):float>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Log10
+
+-- Function name: log10
+-- !query
+SELECT log10(10)
+-- !query schema
+struct<LOG10(CAST(10 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.FromUTCTimestamp
+
+-- Function name: from_utc_timestamp
+-- !query
+SELECT from_utc_timestamp('2016-08-31', 'Asia/Seoul')
+-- !query schema
+struct<from_utc_timestamp(CAST(2016-08-31 AS TIMESTAMP), Asia/Seoul):timestamp>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.CreateNamedStruct
+
+-- Function name: named_struct
+-- !query
+SELECT named_struct("a", 1, "b", 2, "c", 3)
+-- !query schema
+struct<named_struct(a, 1, b, 2, c, 3):struct<a:int,b:int,c:int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.BRound
+
+-- Function name: bround
+-- !query
+SELECT bround(2.5, 0)
+-- !query schema
+struct<bround(2.5, 0):decimal(2,0)>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Year
+
+-- Function name: year
+-- !query
+SELECT year('2016-07-30')
+-- !query schema
+struct<year(CAST(2016-07-30 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.WeekOfYear
+
+-- Function name: weekofyear
+-- !query
+SELECT weekofyear('2008-02-20')
+-- !query schema
+struct<weekofyear(CAST(2008-02-20 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Hour
+
+-- Function name: hour
+-- !query
+SELECT hour('2009-07-30 12:58:59')
+-- !query schema
+struct<hour(CAST(2009-07-30 12:58:59 AS TIMESTAMP)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DayOfWeek
+
+-- Function name: dayofweek
+-- !query
+SELECT dayofweek('2009-07-30')
+-- !query schema
+struct<dayofweek(CAST(2009-07-30 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayContains
+
+-- Function name: array_contains
+-- !query
+SELECT array_contains(array(1, 2, 3), 2)
+-- !query schema
+struct<array_contains(array(1, 2, 3), 2):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Base64
+
+-- Function name: base64
+-- !query
+SELECT base64('Spark SQL')
+-- !query schema
+struct<base64(CAST(Spark SQL AS BINARY)):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.UnaryMinus
+
+-- Function name: negative
+-- !query
+SELECT negative(1)
+-- !query schema
+struct<(- 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Explode
+
+-- Function name: explode
+-- !query
+SELECT explode(array(10, 20))
+-- !query schema
+struct<col:int>
+
+-- Function name: explode_outer
+-- !query
+SELECT explode_outer(array(10, 20))
+-- !query schema
+struct<col:int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ParseToDate
+
+-- Function name: to_date
+-- !query
+SELECT to_date('2009-07-30 04:17:52')
+-- !query schema
+struct<to_date('2009-07-30 04:17:52'):date>
+-- !query
+SELECT to_date('2016-12-31', 'yyyy-MM-dd')
+-- !query schema
+struct<to_date('2016-12-31', 'yyyy-MM-dd'):date>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ParseUrl
+
+-- Function name: parse_url
+-- !query
+SELECT parse_url('http://spark.apache.org/path?query=1', 'HOST')
+-- !query schema
+struct<parse_url(http://spark.apache.org/path?query=1, HOST):string>
+-- !query
+SELECT parse_url('http://spark.apache.org/path?query=1', 'QUERY')
+-- !query schema
+struct<parse_url(http://spark.apache.org/path?query=1, QUERY):string>
+-- !query
+SELECT parse_url('http://spark.apache.org/path?query=1', 'QUERY', 'query')
+-- !query schema
+struct<parse_url(http://spark.apache.org/path?query=1, QUERY, query):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Cosh
+
+-- Function name: cosh
+-- !query
+SELECT cosh(0)
+-- !query schema
+struct<COSH(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayAggregate
+
+-- Function name: aggregate
+-- !query
+SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x)
+-- !query schema
+struct<aggregate(array(1, 2, 3), 0, lambdafunction((namedlambdavariable() + 
namedlambdavariable()), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable())):int>
+-- !query
+SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x, acc -> acc * 10)
+-- !query schema
+struct<aggregate(array(1, 2, 3), 0, lambdafunction((namedlambdavariable() + 
namedlambdavariable()), namedlambdavariable(), namedlambdavariable()), 
lambdafunction((namedlambdavariable() * 10), namedlambdavariable())):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ShiftRightUnsigned
+
+-- Function name: shiftrightunsigned
+-- !query
+SELECT shiftrightunsigned(4, 1)
+-- !query schema
+struct<shiftrightunsigned(4, 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Nvl2
+
+-- Function name: nvl2
+-- !query
+SELECT nvl2(NULL, 2, 1)
+-- !query schema
+struct<nvl2(NULL, 2, 1):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.DateDiff
+
+-- Function name: datediff
+-- !query
+SELECT datediff('2009-07-31', '2009-07-30')
+-- !query schema
+struct<datediff(CAST(2009-07-31 AS DATE), CAST(2009-07-30 AS DATE)):int>
+-- !query
+SELECT datediff('2009-07-30', '2009-07-31')
+-- !query schema
+struct<datediff(CAST(2009-07-30 AS DATE), CAST(2009-07-31 AS DATE)):int>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Log1p
+
+-- Function name: log1p
+-- !query
+SELECT log1p(0)
+-- !query schema
+struct<LOG1P(CAST(0 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.NaNvl
+
+-- Function name: nanvl
+-- !query
+SELECT nanvl(cast('NaN' as double), 123)
+-- !query schema
+struct<nanvl(CAST(NaN AS DOUBLE), CAST(123 AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapEntries
+
+-- Function name: map_entries
+-- !query
+SELECT map_entries(map(1, 'a', 2, 'b'))
+-- !query schema
+struct<map_entries(map(1, a, 2, b)):array<struct<key:int,value:string>>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Reverse
+
+-- Function name: reverse
+-- !query
+SELECT reverse('Spark SQL')
+-- !query schema
+struct<reverse(Spark SQL):string>
+-- !query
+SELECT reverse(array(2, 1, 4, 3))
+-- !query schema
+struct<reverse(array(2, 1, 4, 3)):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayIntersect
+
+-- Function name: array_intersect
+-- !query
+SELECT array_intersect(array(1, 2, 3), array(1, 3, 5))
+-- !query schema
+struct<array_intersect(array(1, 2, 3), array(1, 3, 5)):array<int>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.StddevSamp
+
+-- Function name: stddev_samp
+-- !query
+SELECT stddev_samp(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<stddev_samp(CAST(col AS DOUBLE)):double>
+
+-- Function name: stddev
+-- !query
+SELECT stddev(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<stddev(CAST(col AS DOUBLE)):double>
+
+-- Function name: std
+-- !query
+SELECT std(col) FROM VALUES (1), (2), (3) AS tab(col)
+-- !query schema
+struct<std(CAST(col AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.MapValues
+
+-- Function name: map_values
+-- !query
+SELECT map_values(map(1, 'a', 2, 'b'))
+-- !query schema
+struct<map_values(map(1, a, 2, b)):array<string>>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArraysOverlap
+
+-- Function name: arrays_overlap
+-- !query
+SELECT arrays_overlap(array(1, 2, 3), array(3, 4, 5))
+-- !query schema
+struct<arrays_overlap(array(1, 2, 3), array(3, 4, 5)):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Rollup
+
+-- Function name: rollup
+-- !query
+SELECT name, age, count(*) FROM VALUES (2, 'Alice'), (5, 'Bob') people(age, 
name) GROUP BY rollup(name, age)
+-- !query schema
+struct<name:string,age:int,count(1):bigint>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.LessThanOrEqual
+
+-- Function name: <=
+-- !query
+SELECT 2 <= 2
+-- !query schema
+struct<(2 <= 2):boolean>
+-- !query
+SELECT 1.0 <= '1'
+-- !query schema
+struct<(CAST(1.0 AS DOUBLE) <= CAST(1 AS DOUBLE)):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') <= to_date('2009-07-30 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') <= to_date('2009-07-30 
04:17:52')):boolean>
+-- !query
+SELECT to_date('2009-07-30 04:17:52') <= to_date('2009-08-01 04:17:52')
+-- !query schema
+struct<(to_date('2009-07-30 04:17:52') <= to_date('2009-08-01 
04:17:52')):boolean>
+-- !query
+SELECT 1 <= NULL
+-- !query schema
+struct<(1 <= CAST(NULL AS INT)):boolean>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Elt
+
+-- Function name: elt
+-- !query
+SELECT elt(1, 'scala', 'java')
+-- !query schema
+struct<elt(1, scala, java):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.aggregate.Skewness
+
+-- Function name: skewness
+-- !query
+SELECT skewness(col) FROM VALUES (-10), (-20), (100), (1000) AS tab(col)
+-- !query schema
+struct<skewness(CAST(col AS DOUBLE)):double>
+-- !query
+SELECT skewness(col) FROM VALUES (-1000), (-100), (10), (20) AS tab(col)
+-- !query schema
+struct<skewness(CAST(col AS DOUBLE)):double>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Left
+
+-- Function name: left
+-- !query
+SELECT left('Spark SQL', 3)
+-- !query schema
+struct<left('Spark SQL', 3):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.StringTrimRight
+
+-- Function name: rtrim
+-- !query
+SELECT rtrim('    SparkSQL   ')
+-- !query schema
+struct<rtrim(    SparkSQL   ):string>
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.Lead
+
+-- Function name: lead
+
+
+-- Class name: org.apache.spark.sql.catalyst.expressions.ArrayForAll
+
+-- Function name: forall
+-- !query
+SELECT forall(array(1, 2, 3), x -> x % 2 == 0)
+-- !query schema
+struct<forall(array(1, 2, 3), lambdafunction(((namedlambdavariable() % 2) = 
0), namedlambdavariable())):boolean>
+-- !query
+SELECT forall(array(2, 4, 8), x -> x % 2 == 0)
+-- !query schema
+struct<forall(array(2, 4, 8), lambdafunction(((namedlambdavariable() % 2) = 
0), namedlambdavariable())):boolean>
+-- !query
+SELECT forall(array(1, null, 3), x -> x % 2 == 0)
+-- !query schema
+struct<forall(array(1, CAST(NULL AS INT), 3), 
lambdafunction(((namedlambdavariable() % 2) = 0), 
namedlambdavariable())):boolean>
+-- !query
+SELECT forall(array(2, null, 8), x -> x % 2 == 0)
+-- !query schema
+struct<forall(array(2, CAST(NULL AS INT), 8), 
lambdafunction(((namedlambdavariable() % 2) = 0), 
namedlambdavariable())):boolean>
 
 Review comment:
   Actually, we do not need all of them. How about just showing the first one?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to