pingsutw commented on a change in pull request #32738:
URL: https://github.com/apache/spark/pull/32738#discussion_r644481542



##########
File path: python/pyspark/pandas/generic.py
##########
@@ -3064,25 +3064,25 @@ def ffill(self, axis=None, inplace=False, limit=None) 
-> Union["DataFrame", "Ser
 
     @property
     def at(self) -> AtIndexer:
-        return AtIndexer(self)
+        return AtIndexer(self)  # type: ignore

Review comment:
       error: Argument 1 to "AtIndexer" has incompatible type "Frame"; expected 
"Union[Series[Any], DataFrame[Any]]"

##########
File path: python/pyspark/pandas/indexing.py
##########
@@ -514,7 +514,7 @@ def __getitem__(self, key) -> Union["Series", "DataFrame"]:
         except AnalysisException:
             raise KeyError(
                 "[{}] don't exist in columns".format(
-                    [col._jc.toString() for col in data_spark_columns]
+                    [col._jc.toString() for col in data_spark_columns]  # 
type: ignore

Review comment:
       `error: "Column" not callable`, since we tried to access variable `_jc`

##########
File path: python/pyspark/pandas/indexing.py
##########
@@ -608,7 +608,9 @@ def __setitem__(self, key, value):
             if cond is None:
                 cond = F.lit(True)
             if limit is not None:
-                cond = cond & (self._internal.spark_frame[self._sequence_col] 
< F.lit(limit))
+                cond = cond & (
+                    self._internal.
+                    spark_frame[self._sequence_col] < F.lit(limit))  # type: 
ignore

Review comment:
       Same here

##########
File path: python/pyspark/pandas/indexing.py
##########
@@ -1138,7 +1146,8 @@ def _select_rows_else(
         )
 
     def _get_from_multiindex_column(
-        self, key, missing_keys, labels=None, recursed=0
+        self, key: Optional[Tuple], missing_keys: Optional[List[Tuple]],
+            labels: Optional[List[Tuple]] = None, recursed: int = 0

Review comment:
       Updated

##########
File path: python/pyspark/pandas/indexing.py
##########
@@ -1246,7 +1255,8 @@ def _select_cols_by_iterable(
                     % (len(cast(Sized, cols_sel)), 
len(self._internal.column_labels))
                 )
             if isinstance(cols_sel, pd.Series):
-                if not 
cols_sel.index.sort_values().equals(self._psdf.columns.sort_values()):
+                if not cols_sel.index.sort_values().equals(
+                        self._psdf.columns.sort_values()):  # type: ignore

Review comment:
       Updated

##########
File path: python/pyspark/pandas/indexing.py
##########
@@ -608,7 +608,9 @@ def __setitem__(self, key, value):
             if cond is None:
                 cond = F.lit(True)
             if limit is not None:
-                cond = cond & (self._internal.spark_frame[self._sequence_col] 
< F.lit(limit))
+                cond = cond & (
+                    self._internal.
+                    spark_frame[self._sequence_col] < F.lit(limit))  # type: 
ignore

Review comment:
       `error: "LocIndexerLike" has no attribute "_sequence_col"`

##########
File path: python/pyspark/pandas/indexing.py
##########
@@ -1146,9 +1146,13 @@ def _select_rows_else(
         )
 
     def _get_from_multiindex_column(
-        self, key: Optional[Tuple], missing_keys: Optional[List[Tuple]],
-            labels: Optional[List[Tuple]] = None, recursed: int = 0
-    ) -> Tuple[List[Tuple], Optional[List[spark.Column]], Any, bool, 
Optional[Tuple]]:
+        self,
+        key: Optional[Tuple],
+        missing_keys: Optional[List[Tuple]],
+        labels: Optional[List[Tuple]] = None,
+        recursed: int = 0
+    ) -> Tuple[List[Tuple], Optional[List[spark.Column]], Any, bool,
+               Optional[Tuple]]:

Review comment:
       Sure. Thanks for pointing that out.
   I will pay more attention on coding style next time




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to