maropu commented on a change in pull request #24881: [SPARK-23160][SQL][TEST]
Port window.sql
URL: https://github.com/apache/spark/pull/24881#discussion_r317460926
##########
File path: sql/core/src/test/resources/sql-tests/results/pgSQL/window.sql.out
##########
@@ -0,0 +1,2247 @@
+-- Automatically generated by SQLQueryTestSuite
+-- Number of queries: 151
+
+
+-- !query 0
+CREATE TEMPORARY VIEW tenk2 AS SELECT * FROM tenk1
+-- !query 0 schema
+struct<>
+-- !query 0 output
+
+
+
+-- !query 1
+CREATE TABLE empsalary (
+ depname string,
+ empno integer,
+ salary int,
+ enroll_date date
+) USING parquet
+-- !query 1 schema
+struct<>
+-- !query 1 output
+
+
+
+-- !query 2
+INSERT INTO empsalary VALUES
+('develop', 10, 5200, '2007-08-01'),
+('sales', 1, 5000, '2006-10-01'),
+('personnel', 5, 3500, '2007-12-10'),
+('sales', 4, 4800, '2007-08-08'),
+('personnel', 2, 3900, '2006-12-23'),
+('develop', 7, 4200, '2008-01-01'),
+('develop', 9, 4500, '2008-01-01'),
+('sales', 3, 4800, '2007-08-01'),
+('develop', 8, 6000, '2006-10-01'),
+('develop', 11, 5200, '2007-08-15')
+-- !query 2 schema
+struct<>
+-- !query 2 output
+
+
+
+-- !query 3
+SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM
empsalary ORDER BY depname, salary
+-- !query 3 schema
+struct<depname:string,empno:int,salary:int,sum(CAST(salary AS BIGINT)) OVER
(PARTITION BY depname ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint>
+-- !query 3 output
+develop 7 4200 25100
+develop 9 4500 25100
+develop 10 5200 25100
+develop 11 5200 25100
+develop 8 6000 25100
+personnel 5 3500 7400
+personnel 2 3900 7400
+sales 4 4800 14600
+sales 3 4800 14600
+sales 1 5000 14600
+
+
+-- !query 4
+SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY
salary) FROM empsalary
+-- !query 4 schema
+struct<depname:string,empno:int,salary:int,RANK() OVER (PARTITION BY depname
ORDER BY salary ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):int>
+-- !query 4 output
+develop 10 5200 3
+develop 11 5200 3
+develop 7 4200 1
+develop 8 6000 5
+develop 9 4500 2
+personnel 2 3900 2
+personnel 5 3500 1
+sales 1 5000 3
+sales 3 4800 1
+sales 4 4800 1
+
+
+-- !query 5
+SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1
+GROUP BY four, ten ORDER BY four, ten
+-- !query 5 schema
+struct<four:int,ten:int,sum(sum(CAST(four AS BIGINT))) OVER (PARTITION BY four
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint,avg(ten):double>
+-- !query 5 output
+0 0 0 0.0
+0 2 0 2.0
+0 4 0 4.0
+0 6 0 6.0
+0 8 0 8.0
+1 1 2500 1.0
+1 3 2500 3.0
+1 5 2500 5.0
+1 7 2500 7.0
+1 9 2500 9.0
+2 0 5000 0.0
+2 2 5000 2.0
+2 4 5000 4.0
+2 6 5000 6.0
+2 8 5000 8.0
+3 1 7500 1.0
+3 3 7500 3.0
+3 5 7500 5.0
+3 7 7500 7.0
+3 9 7500 9.0
+
+
+-- !query 6
+SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS
(PARTITION BY depname)
+-- !query 6 schema
+struct<depname:string,empno:int,salary:int,sum(CAST(salary AS BIGINT)) OVER
(PARTITION BY depname ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint>
+-- !query 6 output
+develop 10 5200 25100
+develop 11 5200 25100
+develop 7 4200 25100
+develop 8 6000 25100
+develop 9 4500 25100
+personnel 2 3900 7400
+personnel 5 3500 7400
+sales 1 5000 14600
+sales 3 4800 14600
+sales 4 4800 14600
+
+
+-- !query 7
+SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10
+-- !query 7 schema
+struct<count(1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint>
+-- !query 7 output
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+
+
+-- !query 8
+SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS ()
+-- !query 8 schema
+struct<count(1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint>
+-- !query 8 output
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+
+
+-- !query 9
+SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten)
+-- !query 9 schema
+struct<four:int>
+-- !query 9 output
+
+
+
+-- !query 10
+SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four
FROM tenk1 WHERE unique2 < 10
+-- !query 10 schema
+struct<sum_1:bigint,ten:int,four:int>
+-- !query 10 output
+0 0 0
+0 0 0
+0 4 0
+1 7 1
+1 9 1
+2 0 2
+3 1 3
+3 3 3
+4 1 1
+5 1 1
+
+
+-- !query 11
+SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10
+-- !query 11 schema
+struct<row_number() OVER (ORDER BY unique2 ASC NULLS FIRST ROWS BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 11 output
+1
+10
+2
+3
+4
+5
+6
+7
+8
+9
+
+
+-- !query 12
+SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM
tenk1 WHERE unique2 < 10
+-- !query 12 schema
+struct<rank_1:int,ten:int,four:int>
+-- !query 12 output
+1 0 0
+1 0 0
+1 0 2
+1 1 1
+1 1 1
+1 1 3
+2 3 3
+3 4 0
+3 7 1
+4 9 1
+
+
+-- !query 13
+SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM
tenk1 WHERE unique2 < 10
+-- !query 13 schema
+struct<DENSE_RANK() OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST ROWS
BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int,ten:int,four:int>
+-- !query 13 output
+1 0 0
+1 0 0
+1 0 2
+1 1 1
+1 1 1
+1 1 3
+2 3 3
+2 4 0
+2 7 1
+3 9 1
+
+
+-- !query 14
+SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM
tenk1 WHERE unique2 < 10
+-- !query 14 schema
+struct<PERCENT_RANK() OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):double,ten:int,four:int>
+-- !query 14 output
+0.0 0 0
+0.0 0 0
+0.0 0 2
+0.0 1 1
+0.0 1 1
+0.0 1 3
+0.6666666666666666 7 1
+1.0 3 3
+1.0 4 0
+1.0 9 1
+
+
+-- !query 15
+SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1
WHERE unique2 < 10
+-- !query 15 schema
+struct<cume_dist() OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST RANGE
BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):double,ten:int,four:int>
+-- !query 15 output
+0.5 1 1
+0.5 1 1
+0.5 1 3
+0.6666666666666666 0 0
+0.6666666666666666 0 0
+0.75 7 1
+1.0 0 2
+1.0 3 3
+1.0 4 0
+1.0 9 1
+
+
+-- !query 16
+SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2
< 10
+-- !query 16 schema
+struct<ntile(3) OVER (ORDER BY ten ASC NULLS FIRST, four ASC NULLS FIRST ROWS
BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int,ten:int,four:int>
+-- !query 16 output
+1 0 0
+1 0 0
+1 0 2
+1 1 1
+2 1 1
+2 1 3
+2 3 3
+3 4 0
+3 7 1
+3 9 1
+
+
+-- !query 17
+SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1
WHERE unique2 < 10
+-- !query 17 schema
+struct<lag(ten, 1, NULL) OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST
ROWS BETWEEN -1 FOLLOWING AND -1 FOLLOWING):int,ten:int,four:int>
+-- !query 17 output
+0 0 0
+0 4 0
+1 1 1
+1 3 3
+1 7 1
+7 9 1
+NULL 0 0
+NULL 0 2
+NULL 1 1
+NULL 1 3
+
+
+-- !query 18
+SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1
WHERE unique2 < 10
+-- !query 18 schema
+struct<lead(ten, 1, NULL) OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST
ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING):int,ten:int,four:int>
+-- !query 18 output
+0 0 0
+1 1 1
+3 1 3
+4 0 0
+7 1 1
+9 7 1
+NULL 0 2
+NULL 3 3
+NULL 4 0
+NULL 9 1
+
+
+-- !query 19
+SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM
tenk1 WHERE unique2 < 10
+-- !query 19 schema
+struct<lead((ten * 2), 1, NULL) OVER (PARTITION BY four ORDER BY ten ASC NULLS
FIRST ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING):int,ten:int,four:int>
+-- !query 19 output
+0 0 0
+14 1 1
+18 7 1
+2 1 1
+6 1 3
+8 0 0
+NULL 0 2
+NULL 3 3
+NULL 4 0
+NULL 9 1
+
+
+-- !query 20
+SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four
FROM tenk1 WHERE unique2 < 10
+-- !query 20 schema
+struct<lead((ten * 2), 1, -1) OVER (PARTITION BY four ORDER BY ten ASC NULLS
FIRST ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING):int,ten:int,four:int>
+-- !query 20 output
+-1 0 2
+-1 3 3
+-1 4 0
+-1 9 1
+0 0 0
+14 1 1
+18 7 1
+2 1 1
+6 1 3
+8 0 0
+
+
+-- !query 21
+SELECT first(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1
WHERE unique2 < 10
+-- !query 21 schema
+struct<first(ten, false) OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int,ten:int,four:int>
+-- !query 21 output
+0 0 0
+0 0 0
+0 0 2
+0 4 0
+1 1 1
+1 1 1
+1 1 3
+1 3 3
+1 7 1
+1 9 1
+
+
+-- !query 22
+SELECT last(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10
+-- !query 22 schema
+struct<last(four, false) OVER (ORDER BY ten ASC NULLS FIRST RANGE BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW):int,ten:int,four:int>
+-- !query 22 output
+0 4 0
+1 1 1
+1 1 1
+1 1 3
+1 7 1
+1 9 1
+2 0 0
+2 0 0
+2 0 2
+3 3 3
+
+
+-- !query 23
+SELECT last(ten) OVER (PARTITION BY four), ten, four FROM
+(SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s
+ORDER BY four, ten
+-- !query 23 schema
+struct<last(ten, false) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED
PRECEDING AND UNBOUNDED FOLLOWING):int,ten:int,four:int>
+-- !query 23 output
+4 0 0
+4 0 0
+4 4 0
+9 1 1
+9 1 1
+9 7 1
+9 9 1
+0 0 2
+3 1 3
+3 3 3
+
+
+-- !query 24
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY
two ORDER BY ten) AS wsum
+FROM tenk1 GROUP BY ten, two
+-- !query 24 schema
+struct<ten:int,two:int,gsum:bigint,wsum:bigint>
+-- !query 24 output
+0 0 45000 45000
+1 1 46000 46000
+2 0 47000 92000
+3 1 48000 94000
+4 0 49000 141000
+5 1 50000 144000
+6 0 51000 192000
+7 1 52000 196000
+8 0 53000 245000
+9 1 54000 250000
+
+
+-- !query 25
+SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE
two = 1)s WHERE unique2 < 10
+-- !query 25 schema
+struct<count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING):bigint,four:int>
+-- !query 25 output
+2 3
+2 3
+4 1
+4 1
+4 1
+4 1
+
+
+-- !query 26
+SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY four ORDER BY ten)) AS cntsum
+ FROM tenk1 WHERE unique2 < 10
+-- !query 26 schema
+struct<cntsum:bigint>
+-- !query 26 output
+136
+22
+22
+24
+24
+51
+82
+87
+92
+92
+
+
+-- !query 27
+SELECT * FROM(
+ SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
+ count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount,
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum
+ FROM tenk1
+)sub WHERE total <> fourcount + twosum
+-- !query 27 schema
+struct<total:bigint,fourcount:bigint,twosum:bigint>
+-- !query 27 output
+
+
+
+-- !query 28
+SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1
WHERE unique2 < 10
+-- !query 28 schema
+struct<avg(CAST(four AS BIGINT)) OVER (PARTITION BY four ORDER BY (thousand
div 100) ASC NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):double>
+-- !query 28 output
+0.0
+0.0
+0.0
+1.0
+1.0
+1.0
+1.0
+2.0
+3.0
+3.0
+
+
+-- !query 29
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
+FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten)
+-- !query 29 schema
+struct<ten:int,two:int,gsum:bigint,wsum:bigint>
+-- !query 29 output
+0 0 45000 45000
+1 1 46000 46000
+2 0 47000 92000
+3 1 48000 94000
+4 0 49000 141000
+5 1 50000 144000
+6 0 51000 192000
+7 1 52000 196000
+8 0 53000 245000
+9 1 54000 250000
+
+
+-- !query 30
+SELECT sum(salary),
+ row_number() OVER (ORDER BY depname),
+ sum(sum(salary)) OVER (ORDER BY depname DESC)
+FROM empsalary GROUP BY depname
+-- !query 30 schema
+struct<sum(salary):bigint,row_number() OVER (ORDER BY depname ASC NULLS FIRST
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int,sum(sum(CAST(salary AS
BIGINT))) OVER (ORDER BY depname DESC NULLS LAST RANGE BETWEEN UNBOUNDED
PRECEDING AND CURRENT ROW):bigint>
+-- !query 30 output
+14600 3 14600
+25100 1 47100
+7400 2 22000
+
+
+-- !query 31
+SELECT sum(salary) OVER w1, count(*) OVER w2
+FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary)
+-- !query 31 schema
+struct<sum(CAST(salary AS BIGINT)) OVER (ORDER BY salary ASC NULLS FIRST RANGE
BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):bigint,count(1) OVER (ORDER BY
salary ASC NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):bigint>
+-- !query 31 output
+11600 3
+16100 4
+25700 6
+25700 6
+30700 7
+3500 1
+41100 9
+41100 9
+47100 10
+7400 2
+
+
+-- !query 32
+SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE
FALSE)s
+-- !query 32 schema
+struct<count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING):bigint>
+-- !query 32 output
+
+
+
+-- !query 33
+SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION
BY depname ORDER BY salary DESC)
+-- !query 33 schema
+struct<sum(CAST(salary AS BIGINT)) OVER (PARTITION BY depname ORDER BY salary
DESC NULLS LAST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):bigint,RANK() OVER (PARTITION BY depname ORDER BY salary DESC NULLS LAST
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 33 output
+14600 2
+14600 2
+16400 2
+16400 2
+20900 4
+25100 5
+3900 1
+5000 1
+6000 1
+7400 2
+
+
+-- !query 34
+SET spark.sql.parser.ansi.enabled=false
+-- !query 34 schema
+struct<key:string,value:string>
+-- !query 34 output
+spark.sql.parser.ansi.enabled false
+
+
+-- !query 35
+SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY
empno), MAX(depadj) OVER () FROM(
+SELECT *,
+ CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(year FROM
enroll_date) END * 500 AS bonus,
+ CASE WHEN
+ AVG(salary) OVER (PARTITION BY depname) < salary
+ THEN 200 END AS depadj FROM empsalary
+ )s
+-- !query 35 schema
+struct<empno:int,depname:string,salary:int,bonus:int,depadj:int,min(bonus)
OVER (ORDER BY empno ASC NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND
CURRENT ROW):int,max(depadj) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING):int>
+-- !query 35 output
+1 sales 5000 1000 200 1000 200
+10 develop 5200 500 200 500 200
+11 develop 5200 500 200 500 200
+2 personnel 3900 1000 200 1000 200
+3 sales 4800 500 NULL 500 200
+4 sales 4800 500 NULL 500 200
+5 personnel 3500 500 NULL 500 200
+7 develop 4200 NULL NULL 500 200
+8 develop 6000 1000 200 500 200
+9 develop 4500 NULL NULL 500 200
+
+
+-- !query 36
+SET spark.sql.parser.ansi.enabled=true
+-- !query 36 schema
+struct<key:string,value:string>
+-- !query 36 output
+spark.sql.parser.ansi.enabled true
+
+
+-- !query 37
+create temporary view int4_tbl as select * from values
+ (0),
+ (123456),
+ (-123456),
+ (2147483647),
+ (-2147483647)
+ as int4_tbl(f1)
+-- !query 37 schema
+struct<>
+-- !query 37 output
+
+
+
+-- !query 38
+SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42
+-- !query 38 schema
+struct<sum(count(f1)) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint>
+-- !query 38 output
+0
+
+
+-- !query 39
+select ten,
+ sum(unique1) + sum(unique2) as res,
+ rank() over (order by sum(unique1) + sum(unique2)) as rank
+from tenk1
+group by ten order by ten
+-- !query 39 schema
+struct<ten:int,res:bigint,rank:int>
+-- !query 39 output
+0 9976146 4
+1 10114187 9
+2 10059554 8
+3 9878541 1
+4 9881005 2
+5 9981670 5
+6 9947099 3
+7 10120309 10
+8 9991305 6
+9 10040184 7
+
+
+-- !query 40
+SELECT four, ten,
+sum(ten) over (partition by four order by ten),
+last(ten) over (partition by four order by ten)
+FROM (select distinct ten, four from tenk1) ss
+-- !query 40 schema
+struct<four:int,ten:int,sum(CAST(ten AS BIGINT)) OVER (PARTITION BY four ORDER
BY ten ASC NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):bigint,last(ten, false) OVER (PARTITION BY four ORDER BY ten ASC NULLS
FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 40 output
+0 0 0 0
+0 2 2 2
+0 4 6 4
+0 6 12 6
+0 8 20 8
+1 1 1 1
+1 3 4 3
+1 5 9 5
+1 7 16 7
+1 9 25 9
+2 0 0 0
+2 2 2 2
+2 4 6 4
+2 6 12 6
+2 8 20 8
+3 1 1 1
+3 3 4 3
+3 5 9 5
+3 7 16 7
+3 9 25 9
+
+
+-- !query 41
+SELECT four, ten,
+sum(ten) over (partition by four order by ten range between unbounded
preceding and current row),
+last(ten) over (partition by four order by ten range between unbounded
preceding and current row)
+FROM (select distinct ten, four from tenk1) ss
+-- !query 41 schema
+struct<four:int,ten:int,sum(ten) OVER (PARTITION BY four ORDER BY ten ASC
NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):bigint,last(ten,
false) OVER (PARTITION BY four ORDER BY ten ASC NULLS FIRST RANGE BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 41 output
+0 0 0 0
+0 2 2 2
+0 4 6 4
+0 6 12 6
+0 8 20 8
+1 1 1 1
+1 3 4 3
+1 5 9 5
+1 7 16 7
+1 9 25 9
+2 0 0 0
+2 2 2 2
+2 4 6 4
+2 6 12 6
+2 8 20 8
+3 1 1 1
+3 3 4 3
+3 5 9 5
+3 7 16 7
+3 9 25 9
+
+
+-- !query 42
+SELECT four, ten,
+sum(ten) over (partition by four order by ten range between unbounded
preceding and unbounded following),
+last(ten) over (partition by four order by ten range between unbounded
preceding and unbounded following)
+FROM (select distinct ten, four from tenk1) ss
+-- !query 42 schema
+struct<four:int,ten:int,sum(ten) OVER (PARTITION BY four ORDER BY ten ASC
NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED
FOLLOWING):bigint,last(ten, false) OVER (PARTITION BY four ORDER BY ten ASC
NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING):int>
+-- !query 42 output
+0 0 20 8
+0 2 20 8
+0 4 20 8
+0 6 20 8
+0 8 20 8
+1 1 25 9
+1 3 25 9
+1 5 25 9
+1 7 25 9
+1 9 25 9
+2 0 20 8
+2 2 20 8
+2 4 20 8
+2 6 20 8
+2 8 20 8
+3 1 25 9
+3 3 25 9
+3 5 25 9
+3 7 25 9
+3 9 25 9
+
+
+-- !query 43
+SELECT four, ten/4 as two,
+sum(ten/4) over (partition by four order by ten/4 range between unbounded
preceding and current row),
+last(ten/4) over (partition by four order by ten/4 range between unbounded
preceding and current row)
+FROM (select distinct ten, four from tenk1) ss
+-- !query 43 schema
+struct<four:int,two:int,sum((ten div 4)) OVER (PARTITION BY four ORDER BY (ten
div 4) ASC NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):bigint,last((ten div 4), false) OVER (PARTITION BY four ORDER BY (ten div
4) ASC NULLS FIRST RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 43 output
+0 0 0 0
+0 0 0 0
+0 1 2 1
+0 1 2 1
+0 2 4 2
+1 0 0 0
+1 0 0 0
+1 1 2 1
+1 1 2 1
+1 2 4 2
+2 0 0 0
+2 0 0 0
+2 1 2 1
+2 1 2 1
+2 2 4 2
+3 0 0 0
+3 0 0 0
+3 1 2 1
+3 1 2 1
+3 2 4 2
+
+
+-- !query 44
+SELECT four, ten/4 as two,
+sum(ten/4) over (partition by four order by ten/4 rows between unbounded
preceding and current row),
+last(ten/4) over (partition by four order by ten/4 rows between unbounded
preceding and current row)
+FROM (select distinct ten, four from tenk1) ss
+-- !query 44 schema
+struct<four:int,two:int,sum((ten div 4)) OVER (PARTITION BY four ORDER BY (ten
div 4) ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT
ROW):bigint,last((ten div 4), false) OVER (PARTITION BY four ORDER BY (ten div
4) ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 44 output
+0 0 0 0
+0 0 0 0
+0 1 1 1
+0 1 2 1
+0 2 4 2
+1 0 0 0
+1 0 0 0
+1 1 1 1
+1 1 2 1
+1 2 4 2
+2 0 0 0
+2 0 0 0
+2 1 1 1
+2 1 2 1
+2 2 4 2
+3 0 0 0
+3 0 0 0
+3 1 1 1
+3 1 2 1
+3 2 4 2
+
+
+-- !query 45
+SELECT sum(unique1) over (order by four range between current row and
unbounded following),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 45 schema
+struct<sum(unique1) OVER (ORDER BY four ASC NULLS FIRST RANGE BETWEEN CURRENT
ROW AND UNBOUNDED FOLLOWING):bigint,unique1:int,four:int>
+-- !query 45 output
+10 3 3
+10 7 3
+18 2 2
+18 6 2
+33 1 1
+33 5 1
+33 9 1
+45 0 0
+45 4 0
+45 8 0
+
+
+-- !query 46
+SELECT sum(unique1) over (rows between current row and unbounded following),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 46 schema
+struct<sum(unique1) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED
FOLLOWING):bigint,unique1:int,four:int>
+-- !query 46 output
+0 0 0
+10 3 3
+15 5 1
+23 8 0
+32 9 1
+38 6 2
+39 1 1
+41 2 2
+45 4 0
+7 7 3
+
+
+-- !query 47
+SELECT sum(unique1) over (rows between 2 preceding and 2 following),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 47 schema
+struct<sum(unique1) OVER (ROWS BETWEEN 2 PRECEDING AND 2
FOLLOWING):bigint,unique1:int,four:int>
+-- !query 47 output
+10 0 0
+13 2 2
+15 7 3
+22 1 1
+23 3 3
+26 6 2
+29 9 1
+31 8 0
+32 5 1
+7 4 0
+
+
+-- !query 48
+SELECT sum(unique1) over (rows between 2 preceding and 1 preceding),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 48 schema
+struct<sum(unique1) OVER (ROWS BETWEEN 2 PRECEDING AND 1
PRECEDING):bigint,unique1:int,four:int>
+-- !query 48 output
+10 0 0
+13 3 3
+15 8 0
+17 5 1
+3 6 2
+4 2 2
+6 1 1
+7 9 1
+8 7 3
+NULL 4 0
+
+
+-- !query 49
+SELECT sum(unique1) over (rows between 1 following and 3 following),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 49 schema
+struct<sum(unique1) OVER (ROWS BETWEEN 1 FOLLOWING AND 3
FOLLOWING):bigint,unique1:int,four:int>
+-- !query 49 output
+0 7 3
+10 5 1
+15 8 0
+16 2 2
+16 9 1
+22 6 2
+23 1 1
+7 3 3
+9 4 0
+NULL 0 0
+
+
+-- !query 50
+SELECT sum(unique1) over (rows between unbounded preceding and 1 following),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 50 schema
+struct<sum(unique1) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1
FOLLOWING):bigint,unique1:int,four:int>
+-- !query 50 output
+13 1 1
+22 6 2
+30 9 1
+35 8 0
+38 5 1
+45 0 0
+45 3 3
+45 7 3
+6 4 0
+7 2 2
+
+
+-- !query 51
+SELECT sum(unique1) over (w range between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four)
+-- !query 51 schema
+struct<>
+-- !query 51 output
+org.apache.spark.sql.catalyst.parser.ParseException
+
+mismatched input '(' expecting {<EOF>, ',', 'CLUSTER', 'DISTRIBUTE', 'EXCEPT',
'FROM', 'GROUP', 'HAVING', 'INTERSECT', 'LATERAL', 'LIMIT', 'ORDER', 'MINUS',
'SORT', 'UNION', 'WHERE', 'WINDOW', '-'}(line 1, pos 25)
+
+== SQL ==
+SELECT sum(unique1) over (w range between current row and unbounded following),
+-------------------------^^^
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four)
+
+
+-- !query 52
+CREATE TEMP VIEW v_window AS
+SELECT i.id, sum(i.id) over (order by i.id rows between 1 preceding and 1
following) as sum_rows
+FROM range(1, 11) i
+-- !query 52 schema
+struct<>
+-- !query 52 output
+
+
+
+-- !query 53
+SELECT * FROM v_window
+-- !query 53 schema
+struct<id:bigint,sum_rows:bigint>
+-- !query 53 output
+1 3
+10 19
+2 6
+3 9
+4 12
+5 15
+6 18
+7 21
+8 24
+9 27
+
+
+-- !query 54
+DROP VIEW v_window
+-- !query 54 schema
+struct<>
+-- !query 54 output
+
+
+
+-- !query 55
+SELECT sum(unique1) over (order by four range between 2 preceding and 1
preceding),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 55 schema
+struct<sum(unique1) OVER (ORDER BY four ASC NULLS FIRST RANGE BETWEEN 2
PRECEDING AND 1 PRECEDING):bigint,unique1:int,four:int>
+-- !query 55 output
+12 1 1
+12 5 1
+12 9 1
+23 3 3
+23 7 3
+27 2 2
+27 6 2
+NULL 0 0
+NULL 4 0
+NULL 8 0
+
+
+-- !query 56
+SELECT sum(unique1) over (order by four desc range between 2 preceding and 1
preceding),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 56 schema
+struct<sum(unique1) OVER (ORDER BY four DESC NULLS LAST RANGE BETWEEN 2
PRECEDING AND 1 PRECEDING):bigint,unique1:int,four:int>
+-- !query 56 output
+10 2 2
+10 6 2
+18 1 1
+18 5 1
+18 9 1
+23 0 0
+23 4 0
+23 8 0
+NULL 3 3
+NULL 7 3
+
+
+-- !query 57
+SELECT sum(unique1) over (partition by four order by unique1 range between 5
preceding and 6 following),
+unique1, four
+FROM tenk1 WHERE unique1 < 10
+-- !query 57 schema
+struct<sum(unique1) OVER (PARTITION BY four ORDER BY unique1 ASC NULLS FIRST
RANGE BETWEEN 5 PRECEDING AND 6 FOLLOWING):bigint,unique1:int,four:int>
+-- !query 57 output
+10 3 3
+10 7 3
+12 4 0
+12 8 0
+14 9 1
+15 5 1
+4 0 0
+6 1 1
+8 2 2
+8 6 2
+
+
+-- !query 58
+select last(salary) over(order by salary range between 1000 preceding and 1000
following),
+lag(salary) over(order by salary range between 1000 preceding and 1000
following),
+salary from empsalary
+-- !query 58 schema
+struct<>
+-- !query 58 output
+org.apache.spark.sql.AnalysisException
+Window Frame specifiedwindowframe(RangeFrame, -1000, 1000) must match the
required frame specifiedwindowframe(RowFrame, -1, -1);
+
+
+-- !query 59
+select ss.id, ss.y,
+ first(ss.y) over w,
+ last(ss.y) over w
+from
+ (select x.id, x.id as y from range(1,6) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by ss.id asc nulls first range between 2 preceding and 2 following)
+-- !query 59 schema
+struct<id:bigint,y:bigint,first(y, false) OVER (ORDER BY id ASC NULLS FIRST
RANGE BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT)
FOLLOWING):bigint,last(y, false) OVER (ORDER BY id ASC NULLS FIRST RANGE
BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT) FOLLOWING):bigint>
+-- !query 59 output
+1 1 1 3
+2 2 1 4
+3 3 1 5
+4 4 2 5
+5 5 3 5
+NULL 42 42 43
+NULL 43 42 43
+
+
+-- !query 60
+select ss.id, ss.y,
+ first(ss.y) over w,
+ last(ss.y) over w
+from
+ (select x.id, x.id as y from range(1,6) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by ss.id asc nulls last range between 2 preceding and 2 following)
+-- !query 60 schema
+struct<id:bigint,y:bigint,first(y, false) OVER (ORDER BY id ASC NULLS LAST
RANGE BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT)
FOLLOWING):bigint,last(y, false) OVER (ORDER BY id ASC NULLS LAST RANGE BETWEEN
CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT) FOLLOWING):bigint>
+-- !query 60 output
+1 1 1 3
+2 2 1 4
+3 3 1 5
+4 4 2 5
+5 5 3 5
+NULL 42 42 43
+NULL 43 42 43
+
+
+-- !query 61
+select ss.id, ss.y,
+ first(ss.y) over w,
+ last(ss.y) over w
+from
+ (select x.id, x.id as y from range(1,6) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by ss.id desc nulls first range between 2 preceding and 2 following)
+-- !query 61 schema
+struct<id:bigint,y:bigint,first(y, false) OVER (ORDER BY id DESC NULLS FIRST
RANGE BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT)
FOLLOWING):bigint,last(y, false) OVER (ORDER BY id DESC NULLS FIRST RANGE
BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT) FOLLOWING):bigint>
+-- !query 61 output
+1 1 3 1
+2 2 4 1
+3 3 5 1
+4 4 5 2
+5 5 5 3
+NULL 42 42 43
+NULL 43 42 43
+
+
+-- !query 62
+select ss.id, ss.y,
+ first(ss.y) over w,
+ last(ss.y) over w
+from
+ (select x.id, x.id as y from range(1,6) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by ss.id desc nulls last range between 2 preceding and 2 following)
+-- !query 62 schema
+struct<id:bigint,y:bigint,first(y, false) OVER (ORDER BY id DESC NULLS LAST
RANGE BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT)
FOLLOWING):bigint,last(y, false) OVER (ORDER BY id DESC NULLS LAST RANGE
BETWEEN CAST((- 2) AS BIGINT) FOLLOWING AND CAST(2 AS BIGINT) FOLLOWING):bigint>
+-- !query 62 output
+1 1 3 1
+2 2 4 1
+3 3 5 1
+4 4 5 2
+5 5 5 3
+NULL 42 42 43
+NULL 43 42 43
+
+
+-- !query 63
+select x.id, last(x.id) over (order by x.id range between current row and
2147450884 following)
+from range(32764, 32767) x
+-- !query 63 schema
+struct<id:bigint,last(id, false) OVER (ORDER BY id ASC NULLS FIRST RANGE
BETWEEN CURRENT ROW AND CAST(2147450884 AS BIGINT) FOLLOWING):bigint>
+-- !query 63 output
+32764 32766
+32765 32766
+32766 32766
+
+
+-- !query 64
+select x.id, last(x.id) over (order by x.id desc range between current row and
2147450885 following)
+from range(-32766, -32765) x
+-- !query 64 schema
+struct<id:bigint,last(id, false) OVER (ORDER BY id DESC NULLS LAST RANGE
BETWEEN CURRENT ROW AND CAST(2147450885 AS BIGINT) FOLLOWING):bigint>
+-- !query 64 output
+-32766 -32766
+
+
+-- !query 65
+select x.id, last(x.id) over (order by x.id range between current row and 4
following)
+from range(2147483644, 2147483647) x
+-- !query 65 schema
+struct<id:bigint,last(id, false) OVER (ORDER BY id ASC NULLS FIRST RANGE
BETWEEN CURRENT ROW AND CAST(4 AS BIGINT) FOLLOWING):bigint>
+-- !query 65 output
+2147483644 2147483646
+2147483645 2147483646
+2147483646 2147483646
+
+
+-- !query 66
+select x.id, last(x.id) over (order by x.id desc range between current row and
5 following)
+from range(-2147483646, -2147483645) x
+-- !query 66 schema
+struct<id:bigint,last(id, false) OVER (ORDER BY id DESC NULLS LAST RANGE
BETWEEN CURRENT ROW AND CAST(5 AS BIGINT) FOLLOWING):bigint>
+-- !query 66 output
+-2147483646 -2147483646
+
+
+-- !query 67
+select x.id, last(x.id) over (order by x.id range between current row and 4
following)
+from range(9223372036854775804, 9223372036854775807) x
+-- !query 67 schema
+struct<id:bigint,last(id, false) OVER (ORDER BY id ASC NULLS FIRST RANGE
BETWEEN CURRENT ROW AND CAST(4 AS BIGINT) FOLLOWING):bigint>
+-- !query 67 output
+9223372036854775804 NULL
+9223372036854775805 NULL
+9223372036854775806 NULL
+
+
+-- !query 68
+select x.id, last(x.id) over (order by x.id desc range between current row and
5 following)
+from range(-9223372036854775806, -9223372036854775805) x
+-- !query 68 schema
+struct<id:bigint,last(id, false) OVER (ORDER BY id DESC NULLS LAST RANGE
BETWEEN CURRENT ROW AND CAST(5 AS BIGINT) FOLLOWING):bigint>
+-- !query 68 output
+-9223372036854775806 NULL
+
+
+-- !query 69
+create table numerics (
+ id int,
+ f_float4 float,
+ f_float8 float,
+ f_numeric int
+) using parquet
+-- !query 69 schema
+struct<>
+-- !query 69 output
+
+
+
+-- !query 70
+insert into numerics values
+(1, -3, -3, -3),
+(2, -1, -1, -1),
+(3, 0, 0, 0),
+(4, 1.1, 1.1, 1.1),
+(5, 1.12, 1.12, 1.12),
+(6, 2, 2, 2),
+(7, 100, 100, 100)
+(8, 'infinity', 'infinity', '1000'),
+(9, 'NaN', 'NaN', 'NaN')
+-- !query 70 schema
+struct<>
+-- !query 70 output
+org.apache.spark.sql.catalyst.parser.ParseException
+
+mismatched input '(' expecting {<EOF>, ',', '.', '[', 'ADD', 'AFTER', 'ALL',
'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARCHIVE', 'ARRAY', 'AS', 'ASC', 'AT',
'AUTHORIZATION', 'BETWEEN', 'BOTH', 'BUCKET', 'BUCKETS', 'BY', 'CACHE',
'CASCADE', 'CASE', 'CAST', 'CHANGE', 'CHECK', 'CLEAR', 'CLUSTER', 'CLUSTERED',
'CODEGEN', 'COLLATE', 'COLLECTION', 'COLUMN', 'COLUMNS', 'COMMENT', 'COMMIT',
'COMPACT', 'COMPACTIONS', 'COMPUTE', 'CONCATENATE', 'CONSTRAINT', 'COST',
'CREATE', 'CUBE', 'CURRENT', 'CURRENT_DATE', 'CURRENT_TIME',
'CURRENT_TIMESTAMP', 'CURRENT_USER', 'DATA', 'DATABASE', DATABASES, 'DAY',
'DAYS', 'DBPROPERTIES', 'DEFINED', 'DELETE', 'DELIMITED', 'DESC', 'DESCRIBE',
'DFS', 'DIRECTORIES', 'DIRECTORY', 'DISTINCT', 'DISTRIBUTE', 'DROP', 'ELSE',
'END', 'ESCAPED', 'EXCEPT', 'EXCHANGE', 'EXISTS', 'EXPLAIN', 'EXPORT',
'EXTENDED', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FIELDS', 'FILEFORMAT',
'FIRST', 'FIRST_VALUE', 'FOLLOWING', 'FOR', 'FOREIGN', 'FORMAT', 'FORMATTED',
'FROM', 'FUNCTION', 'FUNCTIONS', 'GLOBAL', 'GRANT', 'GROUP', 'GROUPING',
'HAVING', 'HOUR', 'HOURS', 'IF', 'IGNORE', 'IMPORT', 'IN', 'INDEX', 'INDEXES',
'INPATH', 'INPUTFORMAT', 'INSERT', 'INTERSECT', 'INTERVAL', 'INTO', 'IS',
'ITEMS', 'KEYS', 'LAST', 'LAST_VALUE', 'LATERAL', 'LAZY', 'LEADING', 'LIKE',
'LIMIT', 'LINES', 'LIST', 'LOAD', 'LOCAL', 'LOCATION', 'LOCK', 'LOCKS',
'LOGICAL', 'MACRO', 'MAP', 'MICROSECOND', 'MICROSECONDS', 'MILLISECOND',
'MILLISECONDS', 'MINUTE', 'MINUTES', 'MONTH', 'MONTHS', 'MSCK', 'NO', NOT,
'NULL', 'NULLS', 'OF', 'ONLY', 'OPTION', 'OPTIONS', 'OR', 'ORDER', 'OUT',
'OUTER', 'OUTPUTFORMAT', 'OVER', 'OVERLAPS', 'OVERLAY', 'OVERWRITE',
'PARTITION', 'PARTITIONED', 'PARTITIONS', 'PERCENT', 'PIVOT', 'PLACING',
'POSITION', 'PRECEDING', 'PRIMARY', 'PRINCIPALS', 'PURGE', 'QUERY', 'RANGE',
'RECORDREADER', 'RECORDWRITER', 'RECOVER', 'REDUCE', 'REFERENCES', 'REFRESH',
'RENAME', 'REPAIR', 'REPLACE', 'RESET', 'RESPECT', 'RESTRICT', 'REVOKE', RLIKE,
'ROLE', 'ROLES', 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWS', 'SCHEMA', 'SECOND',
'SECONDS', 'SELECT', 'SEPARATED', 'SERDE', 'SERDEPROPERTIES', 'SESSION_USER',
'SET', 'MINUS', 'SETS', 'SHOW', 'SKEWED', 'SOME', 'SORT', 'SORTED', 'START',
'STATISTICS', 'STORED', 'STRATIFY', 'STRUCT', 'SUBSTR', 'SUBSTRING', 'TABLE',
'TABLES', 'TABLESAMPLE', 'TBLPROPERTIES', TEMPORARY, 'TERMINATED', 'THEN',
'TO', 'TOUCH', 'TRAILING', 'TRANSACTION', 'TRANSACTIONS', 'TRANSFORM', 'TRIM',
'TRUE', 'TRUNCATE', 'TYPE', 'UNARCHIVE', 'UNBOUNDED', 'UNCACHE', 'UNION',
'UNIQUE', 'UNKNOWN', 'UNLOCK', 'UNSET', 'USE', 'USER', 'VALUES', 'VIEW',
'WEEK', 'WEEKS', 'WHEN', 'WHERE', 'WINDOW', 'WITH', 'YEAR', 'YEARS', EQ, '<=>',
'<>', '!=', '<', LTE, '>', GTE, '+', '-', '*', '/', '%', 'DIV', '&', '|', '||',
'^', IDENTIFIER, BACKQUOTED_IDENTIFIER}(line 9, pos 0)
+
+== SQL ==
+insert into numerics values
+(1, -3, -3, -3),
+(2, -1, -1, -1),
+(3, 0, 0, 0),
+(4, 1.1, 1.1, 1.1),
+(5, 1.12, 1.12, 1.12),
+(6, 2, 2, 2),
+(7, 100, 100, 100)
+(8, 'infinity', 'infinity', '1000'),
+^^^
+(9, 'NaN', 'NaN', 'NaN')
+
+
+-- !query 71
+(0, '-infinity', '-infinity', '-1000')
+-- !query 71 schema
+struct<>
+-- !query 71 output
+org.apache.spark.sql.catalyst.parser.ParseException
+
+mismatched input '0' expecting {'(', 'FROM', 'MAP', 'REDUCE', 'SELECT',
'TABLE', 'VALUES', 'WITH'}(line 1, pos 1)
+
+== SQL ==
+(0, '-infinity', '-infinity', '-1000')
+-^^^
+
+
+-- !query 72
+select id, f_float4, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1 preceding and 1 following)
+-- !query 72 schema
+struct<id:int,f_float4:float,first(id, false) OVER (ORDER BY f_float4 ASC
NULLS FIRST RANGE BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1 AS FLOAT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_float4 ASC NULLS FIRST RANGE
BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1 AS FLOAT) FOLLOWING):int>
+-- !query 72 output
+
+
+
+-- !query 73
+select id, f_float4, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1 preceding and 1.1 following)
+-- !query 73 schema
+struct<id:int,f_float4:float,first(id, false) OVER (ORDER BY f_float4 ASC
NULLS FIRST RANGE BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1.1 AS FLOAT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_float4 ASC NULLS FIRST RANGE
BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1.1 AS FLOAT) FOLLOWING):int>
+-- !query 73 output
+
+
+
+-- !query 74
+select id, f_float4, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' preceding and 'inf' following)
+-- !query 74 schema
+struct<id:int,f_float4:float,first(id, false) OVER (ORDER BY f_float4 ASC
NULLS FIRST RANGE BETWEEN CAST((- CAST(inf AS DOUBLE)) AS FLOAT) FOLLOWING AND
CAST(inf AS FLOAT) FOLLOWING):int,last(id, false) OVER (ORDER BY f_float4 ASC
NULLS FIRST RANGE BETWEEN CAST((- CAST(inf AS DOUBLE)) AS FLOAT) FOLLOWING AND
CAST(inf AS FLOAT) FOLLOWING):int>
+-- !query 74 output
+
+
+
+-- !query 75
+select id, f_float4, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1.1 preceding and 'NaN' following)
+-- !query 75 schema
+struct<id:int,f_float4:float,first(id, false) OVER (ORDER BY f_float4 ASC
NULLS FIRST RANGE BETWEEN CAST((- 1.1) AS FLOAT) FOLLOWING AND CAST(NaN AS
FLOAT) FOLLOWING):int,last(id, false) OVER (ORDER BY f_float4 ASC NULLS FIRST
RANGE BETWEEN CAST((- 1.1) AS FLOAT) FOLLOWING AND CAST(NaN AS FLOAT)
FOLLOWING):int>
+-- !query 75 output
+
+
+
+-- !query 76
+select id, f_float8, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1 preceding and 1 following)
+-- !query 76 schema
+struct<id:int,f_float8:float,first(id, false) OVER (ORDER BY f_float8 ASC
NULLS FIRST RANGE BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1 AS FLOAT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_float8 ASC NULLS FIRST RANGE
BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1 AS FLOAT) FOLLOWING):int>
+-- !query 76 output
+
+
+
+-- !query 77
+select id, f_float8, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1 preceding and 1.1 following)
+-- !query 77 schema
+struct<id:int,f_float8:float,first(id, false) OVER (ORDER BY f_float8 ASC
NULLS FIRST RANGE BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1.1 AS FLOAT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_float8 ASC NULLS FIRST RANGE
BETWEEN CAST((- 1) AS FLOAT) FOLLOWING AND CAST(1.1 AS FLOAT) FOLLOWING):int>
+-- !query 77 output
+
+
+
+-- !query 78
+select id, f_float8, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' preceding and 'inf' following)
+-- !query 78 schema
+struct<id:int,f_float8:float,first(id, false) OVER (ORDER BY f_float8 ASC
NULLS FIRST RANGE BETWEEN CAST((- CAST(inf AS DOUBLE)) AS FLOAT) FOLLOWING AND
CAST(inf AS FLOAT) FOLLOWING):int,last(id, false) OVER (ORDER BY f_float8 ASC
NULLS FIRST RANGE BETWEEN CAST((- CAST(inf AS DOUBLE)) AS FLOAT) FOLLOWING AND
CAST(inf AS FLOAT) FOLLOWING):int>
+-- !query 78 output
+
+
+
+-- !query 79
+select id, f_float8, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1.1 preceding and 'NaN' following)
+-- !query 79 schema
+struct<id:int,f_float8:float,first(id, false) OVER (ORDER BY f_float8 ASC
NULLS FIRST RANGE BETWEEN CAST((- 1.1) AS FLOAT) FOLLOWING AND CAST(NaN AS
FLOAT) FOLLOWING):int,last(id, false) OVER (ORDER BY f_float8 ASC NULLS FIRST
RANGE BETWEEN CAST((- 1.1) AS FLOAT) FOLLOWING AND CAST(NaN AS FLOAT)
FOLLOWING):int>
+-- !query 79 output
+
+
+
+-- !query 80
+select id, f_numeric, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1 following)
+-- !query 80 schema
+struct<id:int,f_numeric:int,first(id, false) OVER (ORDER BY f_numeric ASC
NULLS FIRST RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING):int,last(id, false) OVER
(ORDER BY f_numeric ASC NULLS FIRST RANGE BETWEEN 1 PRECEDING AND 1
FOLLOWING):int>
+-- !query 80 output
+
+
+
+-- !query 81
+select id, f_numeric, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1.1 following)
+-- !query 81 schema
+struct<id:int,f_numeric:int,first(id, false) OVER (ORDER BY f_numeric ASC
NULLS FIRST RANGE BETWEEN 1 PRECEDING AND CAST(1.1 AS INT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_numeric ASC NULLS FIRST RANGE
BETWEEN 1 PRECEDING AND CAST(1.1 AS INT) FOLLOWING):int>
+-- !query 81 output
+
+
+
+-- !query 82
+select id, f_numeric, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1.1 following)
+-- !query 82 schema
+struct<id:int,f_numeric:int,first(id, false) OVER (ORDER BY f_numeric ASC
NULLS FIRST RANGE BETWEEN 1 PRECEDING AND CAST(1.1 AS INT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_numeric ASC NULLS FIRST RANGE
BETWEEN 1 PRECEDING AND CAST(1.1 AS INT) FOLLOWING):int>
+-- !query 82 output
+
+
+
+-- !query 83
+select id, f_numeric, first(id) over w, last(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1.1 preceding and 'NaN' following)
+-- !query 83 schema
+struct<id:int,f_numeric:int,first(id, false) OVER (ORDER BY f_numeric ASC
NULLS FIRST RANGE BETWEEN CAST((- 1.1) AS INT) FOLLOWING AND CAST(NaN AS INT)
FOLLOWING):int,last(id, false) OVER (ORDER BY f_numeric ASC NULLS FIRST RANGE
BETWEEN CAST((- 1.1) AS INT) FOLLOWING AND CAST(NaN AS INT) FOLLOWING):int>
+-- !query 83 output
+
+
+
+-- !query 84
+create table datetimes (
+ id int,
+ f_time timestamp,
+ f_timetz timestamp,
+ f_interval timestamp,
+ f_timestamptz timestamp,
+ f_timestamp timestamp
+) using parquet
+-- !query 84 schema
+struct<>
+-- !query 84 output
+
+
+
+-- !query 85
+insert into datetimes values
+(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19
10:23:54'),
+(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19
10:23:54'),
+(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19
10:23:54'),
+(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19
10:23:54'),
+(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19
10:23:54'),
+(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19
10:23:54'),
+(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19
10:23:54'),
+(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19
10:23:54'),
+(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19
10:23:54'),
+(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19
10:23:54')
+-- !query 85 schema
+struct<>
+-- !query 85 output
+
+
+
+-- !query 86
+WITH cte (x) AS (
+ SELECT * FROM range(1, 36, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following)
+-- !query 86 schema
+struct<x:bigint,sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1
PRECEDING AND 1 FOLLOWING):bigint>
+-- !query 86 output
+1 4
+11 33
+13 39
+15 45
+17 51
+19 57
+21 63
+23 69
+25 75
+27 81
+29 87
+3 9
+31 93
+33 99
+35 68
+5 15
+7 21
+9 27
+
+
+-- !query 87
+WITH cte (x) AS (
+ SELECT * FROM range(1, 36, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x range between 1 preceding and 1 following)
+-- !query 87 schema
+struct<x:bigint,sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN CAST((-
1) AS BIGINT) FOLLOWING AND CAST(1 AS BIGINT) FOLLOWING):bigint>
+-- !query 87 output
+1 1
+11 11
+13 13
+15 15
+17 17
+19 19
+21 21
+23 23
+25 25
+27 27
+29 29
+3 3
+31 31
+33 33
+35 35
+5 5
+7 7
+9 9
+
+
+-- !query 88
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM range(5, 50, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following)
+-- !query 88 schema
+struct<x:bigint,sum(x) OVER (ORDER BY x ASC NULLS FIRST ROWS BETWEEN 1
PRECEDING AND 1 FOLLOWING):bigint>
+-- !query 88 output
+1 2
+1 3
+1 7
+11 33
+13 39
+15 45
+17 51
+19 57
+21 63
+23 69
+25 75
+27 81
+29 87
+31 93
+33 99
+35 105
+37 111
+39 117
+41 123
+43 129
+45 135
+47 141
+49 96
+5 13
+7 21
+9 27
+
+
+-- !query 89
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM range(5, 50, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x range between 1 preceding and 1 following)
+-- !query 89 schema
+struct<x:bigint,sum(x) OVER (ORDER BY x ASC NULLS FIRST RANGE BETWEEN CAST((-
1) AS BIGINT) FOLLOWING AND CAST(1 AS BIGINT) FOLLOWING):bigint>
+-- !query 89 output
+1 3
+1 3
+1 3
+11 11
+13 13
+15 15
+17 17
+19 19
+21 21
+23 23
+25 25
+27 27
+29 29
+31 31
+33 33
+35 35
+37 37
+39 39
+41 41
+43 43
+45 45
+47 47
+49 49
+5 5
+7 7
+9 9
+
+
+-- !query 90
+SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL
SELECT * FROM tenk2)s LIMIT 0
+-- !query 90 schema
+struct<count(1) OVER (PARTITION BY four ROWS BETWEEN UNBOUNDED PRECEDING AND
UNBOUNDED FOLLOWING):bigint>
+-- !query 90 output
+
+
+
+-- !query 91
+create table t1 (f1 int, f2 int) using parquet
+-- !query 91 schema
+struct<>
+-- !query 91 output
+
+
+
+-- !query 92
+insert into t1 values (1,1),(1,2),(2,2)
+-- !query 92 schema
+struct<>
+-- !query 92 output
+
+
+
+-- !query 93
+select f1, sum(f1) over (partition by f1
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2
+-- !query 93 schema
+struct<>
+-- !query 93 output
+org.apache.spark.sql.AnalysisException
+cannot resolve '(PARTITION BY default.t1.`f1` RANGE BETWEEN 1 PRECEDING AND 1
FOLLOWING)' due to data type mismatch: A range window frame cannot be used in
an unordered window specification.; line 1 pos 24
+
+
+-- !query 94
+select f1, sum(f1) over (partition by f1, f1 order by f2
+range between 2 preceding and 1 preceding)
+from t1 where f1 = f2
+-- !query 94 schema
+struct<f1:int,sum(f1) OVER (PARTITION BY f1, f1 ORDER BY f2 ASC NULLS FIRST
RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING):bigint>
+-- !query 94 output
+1 NULL
+2 NULL
+
+
+-- !query 95
+select f1, sum(f1) over (partition by f1, f2 order by f2
+range between 1 following and 2 following)
+from t1 where f1 = f2
+-- !query 95 schema
+struct<f1:int,sum(f1) OVER (PARTITION BY f1, f2 ORDER BY f2 ASC NULLS FIRST
RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING):bigint>
+-- !query 95 output
+1 NULL
+2 NULL
+
+
+-- !query 96
+SELECT rank() OVER (ORDER BY length('abc'))
+-- !query 96 schema
+struct<RANK() OVER (ORDER BY length(abc) ASC NULLS FIRST ROWS BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW):int>
+-- !query 96 output
+1
+
+
+-- !query 97
+SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10
+-- !query 97 schema
+struct<>
+-- !query 97 output
+org.apache.spark.sql.AnalysisException
+It is not allowed to use window functions inside WHERE and HAVING clauses;
+
+
+-- !query 98
+SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY
salary) < 10
+-- !query 98 schema
+struct<>
+-- !query 98 output
+org.apache.spark.sql.AnalysisException
+
+The query operator `Join` contains one or more unsupported
+expression types Aggregate, Window or Generate.
+Invalid expressions: [row_number() OVER (ORDER BY default.empsalary.`salary`
ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)];
+
+
+-- !query 99
+SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1
+-- !query 99 schema
+struct<>
+-- !query 99 output
+org.apache.spark.sql.AnalysisException
+
+The query operator `Aggregate` contains one or more unsupported
+expression types Aggregate, Window or Generate.
+Invalid expressions: [RANK() OVER (ORDER BY 1 ASC NULLS FIRST ROWS BETWEEN
UNBOUNDED PRECEDING AND CURRENT ROW)];
+
+
+-- !query 100
+SELECT * FROM rank() OVER (ORDER BY random())
+-- !query 100 schema
+struct<>
+-- !query 100 output
+org.apache.spark.sql.catalyst.parser.ParseException
+
+no viable alternative at input 'ORDER'(line 1, pos 27)
+
+== SQL ==
+SELECT * FROM rank() OVER (ORDER BY random())
+---------------------------^^^
+
+
+-- !query 101
+SELECT * FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10
+-- !query 101 schema
+struct<>
+-- !query 101 output
+org.apache.spark.sql.AnalysisException
+It is not allowed to use window functions inside WHERE and HAVING clauses;
+
+
+-- !query 102
+SELECT * FROM empsalary WHERE rank() OVER (ORDER BY random())
+-- !query 102 schema
+struct<>
+-- !query 102 output
+org.apache.spark.sql.AnalysisException
+It is not allowed to use window functions inside WHERE and HAVING clauses;
+
+
+-- !query 103
+select rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1
+-- !query 103 schema
+struct<>
+-- !query 103 output
+org.apache.spark.sql.catalyst.parser.ParseException
+
+no viable alternative at input 'ORDER'(line 1, pos 39)
+
+== SQL ==
+select rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1
+---------------------------------------^^^
+
+
+-- !query 104
+SELECT range(1, 100) OVER () FROM empsalary
+-- !query 104 schema
+struct<>
+-- !query 104 output
+org.apache.spark.sql.AnalysisException
+Undefined function: 'range'. This function is neither a registered temporary
function nor a permanent function registered in the database 'default'.; line 1
pos 7
Review comment:
This error is not expected one:
https://github.com/postgres/postgres/blob/REL_12_BETA2/src/test/regress/expected/window.out#L2982
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]