Sandeep Singh created SPARK-41907:
-------------------------------------
Summary: Function `sampleby` return parity
Key: SPARK-41907
URL: https://issues.apache.org/jira/browse/SPARK-41907
Project: Spark
Issue Type: Sub-task
Components: Connect
Affects Versions: 3.4.0
Reporter: Sandeep Singh
{code:java}
df = self.df
from pyspark.sql import functions
rnd = df.select("key", functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select("key", functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select("key", functions.rand(0)).collect()
rnd2 = df.select("key", functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select("key", functions.randn(0)).collect()
rndn2 = df.select("key", functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2)){code}
{code:java}
Traceback (most recent call last):
File
"/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py",
line 299, in test_rand_functions
rnd = df.select("key", functions.rand()).collect()
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py",
line 2917, in select
jdf = self._jdf.select(self._jcols(*cols))
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py",
line 2537, in _jcols
return self._jseq(cols, _to_java_column)
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py",
line 2524, in _jseq
return _to_seq(self.sparkSession._sc, cols, converter)
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line
86, in _to_seq
cols = [converter(c) for c in cols]
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line
86, in <listcomp>
cols = [converter(c) for c in cols]
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line
65, in _to_java_column
raise TypeError(
TypeError: Invalid argument, not a string or column: Column<'rand()'> of type
<class 'pyspark.sql.connect.column.Column'>. For column literals, use 'lit',
'array', 'struct' or 'create_map' function.
{code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]