HyukjinKwon commented on a change in pull request #34113:
URL: https://github.com/apache/spark/pull/34113#discussion_r718151080
##########
File path: python/pyspark/pandas/indexes/multi.py
##########
@@ -1137,6 +1137,42 @@ def intersection(self, other: Union[DataFrame, Series,
Index, List]) -> "MultiIn
)
return cast(MultiIndex, DataFrame(internal).index)
+ def equal_levels(self, other: "MultiIndex") -> bool:
+ """
+ Return True if the levels of both MultiIndex objects are the same
+
+ Examples
+ --------
+ >>> psmidx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"z")])
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("b", "y"), ("a", "x"), ("c",
"z")])
+ >>> psmidx1.equal_levels(psmidx2)
+ True
+
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"j")])
+ >>> psmidx1.equal_levels(psmidx2)
+ False
+ """
+ nlevels = self.nlevels
+ if nlevels != other.nlevels:
+ return False
+
+ self_sdf = self._internal.spark_frame
+ other_sdf = other._internal.spark_frame
+ subtract_list = []
+ for nlevel in range(nlevels):
+ self_index_scol = self._internal.index_spark_columns[nlevel]
+ other_index_scol = other._internal.index_spark_columns[nlevel]
+ self_subtract_other = self_sdf.select(self_index_scol).subtract(
Review comment:
I think you should use `exceptAll` to preserve same values.
##########
File path: python/pyspark/pandas/indexes/multi.py
##########
@@ -1137,6 +1137,42 @@ def intersection(self, other: Union[DataFrame, Series,
Index, List]) -> "MultiIn
)
return cast(MultiIndex, DataFrame(internal).index)
+ def equal_levels(self, other: "MultiIndex") -> bool:
+ """
+ Return True if the levels of both MultiIndex objects are the same
+
+ Examples
+ --------
+ >>> psmidx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"z")])
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("b", "y"), ("a", "x"), ("c",
"z")])
+ >>> psmidx1.equal_levels(psmidx2)
+ True
+
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"j")])
+ >>> psmidx1.equal_levels(psmidx2)
+ False
+ """
+ nlevels = self.nlevels
+ if nlevels != other.nlevels:
+ return False
+
+ self_sdf = self._internal.spark_frame
+ other_sdf = other._internal.spark_frame
+ subtract_list = []
+ for nlevel in range(nlevels):
+ self_index_scol = self._internal.index_spark_columns[nlevel]
+ other_index_scol = other._internal.index_spark_columns[nlevel]
+ self_subtract_other = self_sdf.select(self_index_scol).subtract(
Review comment:
This looks like we'll compare each value of multiindex individually.
e.g.)
```python
ps.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
ps.MultiIndex.from_tuples([("b", "x"), ("a", "y")])
```
will be conisdered as same?
##########
File path: python/pyspark/pandas/indexes/multi.py
##########
@@ -1137,6 +1137,42 @@ def intersection(self, other: Union[DataFrame, Series,
Index, List]) -> "MultiIn
)
return cast(MultiIndex, DataFrame(internal).index)
+ def equal_levels(self, other: "MultiIndex") -> bool:
+ """
+ Return True if the levels of both MultiIndex objects are the same
+
+ Examples
+ --------
+ >>> psmidx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"z")])
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("b", "y"), ("a", "x"), ("c",
"z")])
+ >>> psmidx1.equal_levels(psmidx2)
+ True
+
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"j")])
+ >>> psmidx1.equal_levels(psmidx2)
+ False
+ """
+ nlevels = self.nlevels
+ if nlevels != other.nlevels:
+ return False
+
+ self_sdf = self._internal.spark_frame
+ other_sdf = other._internal.spark_frame
+ subtract_list = []
+ for nlevel in range(nlevels):
+ self_index_scol = self._internal.index_spark_columns[nlevel]
+ other_index_scol = other._internal.index_spark_columns[nlevel]
+ self_subtract_other = self_sdf.select(self_index_scol).subtract(
+ other_sdf.select(other_index_scol)
+ )
+ subtract_list.append(self_subtract_other)
+
+ unioned_subtracts = reduce(lambda x, y: x.union(y), subtract_list)
+ if len(unioned_subtracts.head(1)) == 0:
+ return True
+ else:
+ return False
Review comment:
```suggestion
return len(unioned_subtracts.head(1)) == 0
```
##########
File path: python/pyspark/pandas/indexes/multi.py
##########
@@ -1137,6 +1137,42 @@ def intersection(self, other: Union[DataFrame, Series,
Index, List]) -> "MultiIn
)
return cast(MultiIndex, DataFrame(internal).index)
+ def equal_levels(self, other: "MultiIndex") -> bool:
+ """
+ Return True if the levels of both MultiIndex objects are the same
+
+ Examples
+ --------
+ >>> psmidx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"z")])
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("b", "y"), ("a", "x"), ("c",
"z")])
+ >>> psmidx1.equal_levels(psmidx2)
+ True
+
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"j")])
+ >>> psmidx1.equal_levels(psmidx2)
+ False
+ """
+ nlevels = self.nlevels
+ if nlevels != other.nlevels:
+ return False
+
+ self_sdf = self._internal.spark_frame
+ other_sdf = other._internal.spark_frame
+ subtract_list = []
+ for nlevel in range(nlevels):
+ self_index_scol = self._internal.index_spark_columns[nlevel]
+ other_index_scol = other._internal.index_spark_columns[nlevel]
+ self_subtract_other = self_sdf.select(self_index_scol).subtract(
Review comment:
👌
##########
File path: python/pyspark/pandas/indexes/multi.py
##########
@@ -1137,6 +1137,42 @@ def intersection(self, other: Union[DataFrame, Series,
Index, List]) -> "MultiIn
)
return cast(MultiIndex, DataFrame(internal).index)
+ def equal_levels(self, other: "MultiIndex") -> bool:
+ """
+ Return True if the levels of both MultiIndex objects are the same
+
+ Examples
+ --------
+ >>> psmidx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"z")])
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("b", "y"), ("a", "x"), ("c",
"z")])
+ >>> psmidx1.equal_levels(psmidx2)
+ True
+
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"j")])
+ >>> psmidx1.equal_levels(psmidx2)
+ False
+ """
+ nlevels = self.nlevels
+ if nlevels != other.nlevels:
+ return False
+
+ self_sdf = self._internal.spark_frame
+ other_sdf = other._internal.spark_frame
+ subtract_list = []
+ for nlevel in range(nlevels):
+ self_index_scol = self._internal.index_spark_columns[nlevel]
+ other_index_scol = other._internal.index_spark_columns[nlevel]
+ self_subtract_other = self_sdf.select(self_index_scol).subtract(
Review comment:
👌
##########
File path: python/pyspark/pandas/indexes/multi.py
##########
@@ -1137,6 +1137,42 @@ def intersection(self, other: Union[DataFrame, Series,
Index, List]) -> "MultiIn
)
return cast(MultiIndex, DataFrame(internal).index)
+ def equal_levels(self, other: "MultiIndex") -> bool:
+ """
+ Return True if the levels of both MultiIndex objects are the same
+
+ Examples
+ --------
+ >>> psmidx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"z")])
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("b", "y"), ("a", "x"), ("c",
"z")])
+ >>> psmidx1.equal_levels(psmidx2)
+ True
+
+ >>> psmidx2 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c",
"j")])
+ >>> psmidx1.equal_levels(psmidx2)
+ False
+ """
+ nlevels = self.nlevels
+ if nlevels != other.nlevels:
+ return False
+
+ self_sdf = self._internal.spark_frame
+ other_sdf = other._internal.spark_frame
+ subtract_list = []
+ for nlevel in range(nlevels):
+ self_index_scol = self._internal.index_spark_columns[nlevel]
+ other_index_scol = other._internal.index_spark_columns[nlevel]
+ self_subtract_other = self_sdf.select(self_index_scol).subtract(
Review comment:
Can you elabourate the equality condition condition on this API? Seems
like we can just leverage distinct values to compare?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]