ueshin commented on a change in pull request #34197:
URL: https://github.com/apache/spark/pull/34197#discussion_r728529860
##########
File path: python/pyspark/sql/group.py
##########
@@ -17,16 +17,23 @@
import sys
-from pyspark.sql.column import Column, _to_seq
+from typing import Callable, List, Optional, Type, TYPE_CHECKING, overload,
Dict, Union, cast, Tuple
+
+if TYPE_CHECKING:
+ from pyspark.sql._typing import LiteralType
+
+from pyspark.sql.column import Column, _to_seq # type: ignore[attr-defined]
+from pyspark.sql.context import SQLContext
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
+from py4j.java_gateway import JavaObject # type: ignore[import]
__all__ = ["GroupedData"]
-def dfapi(f):
- def _api(self):
+def dfapi(f: Callable) -> Callable:
+ def _api(self: Type["GroupedData"]) -> DataFrame:
Review comment:
`self: "GroupedData"`?
##########
File path: python/pyspark/sql/group.py
##########
@@ -17,16 +17,23 @@
import sys
-from pyspark.sql.column import Column, _to_seq
+from typing import Callable, List, Optional, Type, TYPE_CHECKING, overload,
Dict, Union, cast, Tuple
+
+if TYPE_CHECKING:
+ from pyspark.sql._typing import LiteralType
Review comment:
Shall we put this at the end of the import block?
##########
File path: python/pyspark/sql/group.py
##########
@@ -35,10 +42,13 @@ def _api(self):
return _api
-def df_varargs_api(f):
- def _api(self, *cols):
+def df_varargs_api(f: Callable) -> Callable:
+ def _api(self: Type["GroupedData"], *cols: Column) -> DataFrame:
Review comment:
Also `*cols: str`?
##########
File path: python/pyspark/sql/group.py
##########
@@ -17,16 +17,23 @@
import sys
-from pyspark.sql.column import Column, _to_seq
+from typing import Callable, List, Optional, Type, TYPE_CHECKING, overload,
Dict, Union, cast, Tuple
+
+if TYPE_CHECKING:
+ from pyspark.sql._typing import LiteralType
+
+from pyspark.sql.column import Column, _to_seq # type: ignore[attr-defined]
+from pyspark.sql.context import SQLContext
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
+from py4j.java_gateway import JavaObject # type: ignore[import]
Review comment:
Shall we put this line between builtin libraries and pyspark?
##########
File path: python/pyspark/sql/group.py
##########
@@ -35,10 +42,13 @@ def _api(self):
return _api
-def df_varargs_api(f):
- def _api(self, *cols):
+def df_varargs_api(f: Callable) -> Callable:
+ def _api(self: Type["GroupedData"], *cols: Column) -> DataFrame:
Review comment:
ditto.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]