ueshin commented on code in PR #41606:
URL: https://github.com/apache/spark/pull/41606#discussion_r1253479475
##########
python/pyspark/testing/utils.py:
##########
@@ -209,3 +233,71 @@ def check_error(
self.assertEqual(
expected, actual, f"Expected message parameters was '{expected}',
got '{actual}'"
)
+
+
+def assertDataFrameEqual(
+ df: DataFrame, expected: Union[DataFrame, List[Row]], ignore_row_order:
bool = True
+):
+ if df is None and expected is None:
+ return True
+ elif df is None or expected is None:
+ return False
+
+ def compare(r1: Row, r2: Row):
+ if r1 is None and r2 is None:
+ return True
+ elif r1 is None or r2 is None:
+ return False
+
+ d1 = r1.asDict()
+ d2 = r2.asDict()
Review Comment:
We can't use `asDict()` here because `Row` can have same name fields.
##########
python/pyspark/testing/utils.py:
##########
@@ -209,3 +233,71 @@ def check_error(
self.assertEqual(
expected, actual, f"Expected message parameters was '{expected}',
got '{actual}'"
)
+
+
+def assertDataFrameEqual(
+ df: DataFrame, expected: Union[DataFrame, List[Row]], ignore_row_order:
bool = True
+):
+ if df is None and expected is None:
+ return True
+ elif df is None or expected is None:
+ return False
+
+ def compare(r1: Row, r2: Row):
+ if r1 is None and r2 is None:
+ return True
+ elif r1 is None or r2 is None:
+ return False
+
+ d1 = r1.asDict()
+ d2 = r2.asDict()
+
+ for key in d1.keys() & d2.keys():
+ if isinstance(d1[key], float) and isinstance(d2[key], float):
+ if abs(d1[key] - d2[key]) > 1e-5:
+ return False
+ else:
+ if d1[key] != d2[key]:
+ return False
+ return True
+
+ def assert_schema_equal(
+ df_schema: Optional[Union[AtomicType, StructType, str, List[str],
Tuple[str, ...]]],
+ expected_schema: Optional[Union[AtomicType, StructType, str,
List[str], Tuple[str, ...]]],
+ ):
+ if df_schema != expected_schema:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_SCHEMA",
+ message_parameters={"df_schema": df.schema, "expected_schema":
expected.schema},
+ )
+
+ def assert_rows_equal(rows1: Row, rows2: Row):
+ zipped = list(zip_longest(rows1, rows2))
+ rows_equal = True
+ error_table = PrettyTable(["df", "expected"])
+
+ for r1, r2 in zipped:
+ if compare(r1, r2):
+ error_table.add_row([blue(r1), blue(r2)])
+ else:
+ rows_equal = False
+ error_table.add_row([red(r1), red(r2)])
+
+ if not rows_equal:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_DATAFRAME",
+ message_parameters={"error_table": error_table.get_string()},
+ )
+
+ if ignore_row_order:
+ try:
+ df = df.sort(df.columns)
+ expected = expected.sort(expected.columns)
+ except:
+ raise PySparkAssertionError(
+ error_class="UNSUPPORTED_DTYPE_FOR_IGNORE_ROW_ORDER",
Review Comment:
Shall we use `DATATYPE` instead of `DTYPE`?
##########
python/pyspark/testing/utils.py:
##########
@@ -209,3 +233,71 @@ def check_error(
self.assertEqual(
expected, actual, f"Expected message parameters was '{expected}',
got '{actual}'"
)
+
+
+def assertDataFrameEqual(
+ df: DataFrame, expected: Union[DataFrame, List[Row]], ignore_row_order:
bool = True
+):
+ if df is None and expected is None:
+ return True
+ elif df is None or expected is None:
+ return False
+
+ def compare(r1: Row, r2: Row):
+ if r1 is None and r2 is None:
+ return True
+ elif r1 is None or r2 is None:
+ return False
+
+ d1 = r1.asDict()
+ d2 = r2.asDict()
+
+ for key in d1.keys() & d2.keys():
+ if isinstance(d1[key], float) and isinstance(d2[key], float):
+ if abs(d1[key] - d2[key]) > 1e-5:
+ return False
+ else:
+ if d1[key] != d2[key]:
+ return False
+ return True
+
+ def assert_schema_equal(
+ df_schema: Optional[Union[AtomicType, StructType, str, List[str],
Tuple[str, ...]]],
+ expected_schema: Optional[Union[AtomicType, StructType, str,
List[str], Tuple[str, ...]]],
+ ):
Review Comment:
Not that this is a private function in `assertDataFrameEqual`, we can expect
the schemas as `StructType` only?
##########
python/pyspark/testing/utils.py:
##########
@@ -209,3 +233,71 @@ def check_error(
self.assertEqual(
expected, actual, f"Expected message parameters was '{expected}',
got '{actual}'"
)
+
+
+def assertDataFrameEqual(
+ df: DataFrame, expected: Union[DataFrame, List[Row]], ignore_row_order:
bool = True
+):
+ if df is None and expected is None:
+ return True
+ elif df is None or expected is None:
+ return False
+
+ def compare(r1: Row, r2: Row):
+ if r1 is None and r2 is None:
+ return True
+ elif r1 is None or r2 is None:
+ return False
+
+ d1 = r1.asDict()
+ d2 = r2.asDict()
+
+ for key in d1.keys() & d2.keys():
+ if isinstance(d1[key], float) and isinstance(d2[key], float):
+ if abs(d1[key] - d2[key]) > 1e-5:
+ return False
+ else:
+ if d1[key] != d2[key]:
+ return False
+ return True
+
+ def assert_schema_equal(
+ df_schema: Optional[Union[AtomicType, StructType, str, List[str],
Tuple[str, ...]]],
+ expected_schema: Optional[Union[AtomicType, StructType, str,
List[str], Tuple[str, ...]]],
+ ):
+ if df_schema != expected_schema:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_SCHEMA",
+ message_parameters={"df_schema": df.schema, "expected_schema":
expected.schema},
+ )
+
+ def assert_rows_equal(rows1: Row, rows2: Row):
+ zipped = list(zip_longest(rows1, rows2))
+ rows_equal = True
+ error_table = PrettyTable(["df", "expected"])
+
+ for r1, r2 in zipped:
+ if compare(r1, r2):
+ error_table.add_row([blue(r1), blue(r2)])
+ else:
+ rows_equal = False
+ error_table.add_row([red(r1), red(r2)])
+
+ if not rows_equal:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_DATAFRAME",
+ message_parameters={"error_table": error_table.get_string()},
+ )
+
+ if ignore_row_order:
+ try:
+ df = df.sort(df.columns)
+ expected = expected.sort(expected.columns)
+ except:
Review Comment:
Shall we catch a specific exception class that could be caused by `df.sort`
only?
##########
python/pyspark/testing/utils.py:
##########
@@ -209,3 +233,71 @@ def check_error(
self.assertEqual(
expected, actual, f"Expected message parameters was '{expected}',
got '{actual}'"
)
+
+
+def assertDataFrameEqual(
+ df: DataFrame, expected: Union[DataFrame, List[Row]], ignore_row_order:
bool = True
+):
+ if df is None and expected is None:
+ return True
+ elif df is None or expected is None:
+ return False
+
+ def compare(r1: Row, r2: Row):
+ if r1 is None and r2 is None:
+ return True
+ elif r1 is None or r2 is None:
+ return False
+
+ d1 = r1.asDict()
+ d2 = r2.asDict()
+
+ for key in d1.keys() & d2.keys():
+ if isinstance(d1[key], float) and isinstance(d2[key], float):
+ if abs(d1[key] - d2[key]) > 1e-5:
+ return False
+ else:
+ if d1[key] != d2[key]:
+ return False
+ return True
+
+ def assert_schema_equal(
+ df_schema: Optional[Union[AtomicType, StructType, str, List[str],
Tuple[str, ...]]],
+ expected_schema: Optional[Union[AtomicType, StructType, str,
List[str], Tuple[str, ...]]],
+ ):
+ if df_schema != expected_schema:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_SCHEMA",
+ message_parameters={"df_schema": df.schema, "expected_schema":
expected.schema},
+ )
+
+ def assert_rows_equal(rows1: Row, rows2: Row):
+ zipped = list(zip_longest(rows1, rows2))
+ rows_equal = True
+ error_table = PrettyTable(["df", "expected"])
+
+ for r1, r2 in zipped:
+ if compare(r1, r2):
+ error_table.add_row([blue(r1), blue(r2)])
+ else:
+ rows_equal = False
+ error_table.add_row([red(r1), red(r2)])
+
+ if not rows_equal:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_DATAFRAME",
+ message_parameters={"error_table": error_table.get_string()},
+ )
+
+ if ignore_row_order:
+ try:
+ df = df.sort(df.columns)
+ expected = expected.sort(expected.columns)
Review Comment:
According to the type hint, `expected` can be `List[Row]`, and I don't think
this works in the case? Also at
https://github.com/apache/spark/pull/41606/files#diff-89e8f0a4413730d86ed98f11e10e3dfe259aa80378a2734eee659ea08bfff8b4R302-R303
##########
python/pyspark/testing/utils.py:
##########
@@ -209,3 +233,71 @@ def check_error(
self.assertEqual(
expected, actual, f"Expected message parameters was '{expected}',
got '{actual}'"
)
+
+
+def assertDataFrameEqual(
+ df: DataFrame, expected: Union[DataFrame, List[Row]], ignore_row_order:
bool = True
+):
+ if df is None and expected is None:
+ return True
+ elif df is None or expected is None:
+ return False
+
+ def compare(r1: Row, r2: Row):
+ if r1 is None and r2 is None:
+ return True
+ elif r1 is None or r2 is None:
+ return False
+
+ d1 = r1.asDict()
+ d2 = r2.asDict()
+
+ for key in d1.keys() & d2.keys():
+ if isinstance(d1[key], float) and isinstance(d2[key], float):
+ if abs(d1[key] - d2[key]) > 1e-5:
+ return False
+ else:
+ if d1[key] != d2[key]:
+ return False
+ return True
+
+ def assert_schema_equal(
+ df_schema: Optional[Union[AtomicType, StructType, str, List[str],
Tuple[str, ...]]],
+ expected_schema: Optional[Union[AtomicType, StructType, str,
List[str], Tuple[str, ...]]],
+ ):
+ if df_schema != expected_schema:
+ raise PySparkAssertionError(
+ error_class="DIFFERENT_SCHEMA",
+ message_parameters={"df_schema": df.schema, "expected_schema":
expected.schema},
Review Comment:
nit: `df_schema` and `expected_schema`?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]