This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 2a97e89  [SPARK-37620][FOLLOW-UP][PYTHON] Drop obsolete type: ignores
2a97e89 is described below

commit 2a97e897346b0668c7c0b3056d14aec83d3d4b1a
Author: zero323 <[email protected]>
AuthorDate: Sun Dec 19 17:09:00 2021 +0900

    [SPARK-37620][FOLLOW-UP][PYTHON] Drop obsolete type: ignores
    
    ### What changes were proposed in this pull request?
    
    This PR drops a bunch of `type: ignores` identified as obsolete during the 
review of SPARK-37620.
    
    ### Why are the changes needed?
    
    There are no longer necessary.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Existing tests.
    
    Closes #34944 from zero323/SPARK-37620-FOLLOW-UP.
    
    Authored-by: zero323 <[email protected]>
    Signed-off-by: Hyukjin Kwon <[email protected]>
---
 python/pyspark/sql/column.py            |  2 +-
 python/pyspark/sql/context.py           | 11 +++--------
 python/pyspark/sql/observation.py       |  2 +-
 python/pyspark/sql/pandas/conversion.py |  4 +---
 python/pyspark/sql/readwriter.py        | 12 ++----------
 python/pyspark/sql/streaming.py         |  6 ++++--
 python/pyspark/util.py                  | 14 +++++---------
 7 files changed, 17 insertions(+), 34 deletions(-)

diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py
index 121e26e..dce0cc6 100644
--- a/python/pyspark/sql/column.py
+++ b/python/pyspark/sql/column.py
@@ -85,7 +85,7 @@ def _to_seq(
     if converter:
         cols = [converter(c) for c in cols]
     assert sc._jvm is not None
-    return sc._jvm.PythonUtils.toSeq(cols)  # type: ignore[attr-defined]
+    return sc._jvm.PythonUtils.toSeq(cols)
 
 
 def _to_list(
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index 6bd8436..6ab70ee 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -171,10 +171,7 @@ class SQLContext:
         ):
             assert sc._jvm is not None
             jsqlContext = (
-                sc._jvm.SparkSession.builder()  # type: ignore[attr-defined]
-                .sparkContext(sc._jsc.sc())  # type: ignore[attr-defined]
-                .getOrCreate()
-                .sqlContext()
+                
sc._jvm.SparkSession.builder().sparkContext(sc._jsc.sc()).getOrCreate().sqlContext()
             )
             sparkSession = SparkSession(sc, jsqlContext.sparkSession())
             cls(sc, sparkSession, jsqlContext)
@@ -734,11 +731,9 @@ class HiveContext(SQLContext):
         you may end up launching multiple derby instances and encounter with 
incredibly
         confusing error messages.
         """
-        jsc = sparkContext._jsc.sc()  # type: ignore[attr-defined]
+        jsc = sparkContext._jsc.sc()
         assert sparkContext._jvm is not None
-        jtestHive = 
sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(  # type: 
ignore[attr-defined]
-            jsc, False
-        )
+        jtestHive = 
sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
         return cls(sparkContext, jtestHive)
 
     def refreshTable(self, tableName: str) -> None:
diff --git a/python/pyspark/sql/observation.py 
b/python/pyspark/sql/observation.py
index 0e48408..e5d426a 100644
--- a/python/pyspark/sql/observation.py
+++ b/python/pyspark/sql/observation.py
@@ -102,7 +102,7 @@ class Observation:
         assert all(isinstance(c, Column) for c in exprs), "all exprs should be 
Column"
         assert self._jo is None, "an Observation can be used with a DataFrame 
only once"
 
-        self._jvm = df._sc._jvm  # type: ignore[attr-defined]
+        self._jvm = df._sc._jvm
         assert self._jvm is not None
         cls = self._jvm.org.apache.spark.sql.Observation
         self._jo = cls(self._name) if self._name is not None else cls()
diff --git a/python/pyspark/sql/pandas/conversion.py 
b/python/pyspark/sql/pandas/conversion.py
index 4dcbb29..4b6e07e 100644
--- a/python/pyspark/sql/pandas/conversion.py
+++ b/python/pyspark/sql/pandas/conversion.py
@@ -614,9 +614,7 @@ class SparkConversionMixin:
             return self._jvm.ArrowRDDServer(jsqlContext)
 
         # Create Spark DataFrame from Arrow stream file, using one batch per 
partition
-        jrdd = self._sc._serialize_to_jvm(  # type: ignore[attr-defined]
-            arrow_data, ser, reader_func, create_RDD_server
-        )
+        jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, 
create_RDD_server)
         assert self._jvm is not None
         jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), 
jsqlContext)
         df = DataFrame(jdf, self._wrapped)
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index ab54ad9..df4a089 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -181,11 +181,7 @@ class DataFrameReader(OptionUtils):
             if type(path) != list:
                 path = [path]  # type: ignore[list-item]
             assert self._spark._sc._jvm is not None
-            return self._df(
-                self._jreader.load(
-                    self._spark._sc._jvm.PythonUtils.toSeq(path)  # type: 
ignore[attr-defined]
-                )
-            )
+            return 
self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
         else:
             return self._df(self._jreader.load())
 
@@ -546,11 +542,7 @@ class DataFrameReader(OptionUtils):
             path = [path]
         if type(path) == list:
             assert self._spark._sc._jvm is not None
-            return self._df(
-                self._jreader.csv(
-                    self._spark._sc._jvm.PythonUtils.toSeq(path)  # type: 
ignore[attr-defined]
-                )
-            )
+            return 
self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
         elif isinstance(path, RDD):
 
             def func(iterator):
diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py
index 0965093..b4d3056 100644
--- a/python/pyspark/sql/streaming.py
+++ b/python/pyspark/sql/streaming.py
@@ -1272,8 +1272,10 @@ class DataStreamWriter:
         serializer = AutoBatchedSerializer(CPickleSerializer())
         wrapped_func = _wrap_function(self._spark._sc, func, serializer, 
serializer)
         assert self._spark._sc._jvm is not None
-        jForeachWriter = 
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter( 
 # type: ignore[attr-defined]
-            wrapped_func, self._df._jdf.schema()
+        jForeachWriter = (
+            
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter(
+                wrapped_func, self._df._jdf.schema()
+            )
         )
         self._jwrite.foreach(jForeachWriter)
         return self
diff --git a/python/pyspark/util.py b/python/pyspark/util.py
index bccd678..de44ab6 100644
--- a/python/pyspark/util.py
+++ b/python/pyspark/util.py
@@ -320,7 +320,7 @@ def inheritable_thread_target(f: Callable) -> Callable:
     """
     from pyspark import SparkContext
 
-    if isinstance(SparkContext._gateway, ClientServer):  # type: 
ignore[attr-defined]
+    if isinstance(SparkContext._gateway, ClientServer):
         # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
 
         # NOTICE the internal difference vs `InheritableThread`. 
`InheritableThread`
@@ -334,9 +334,7 @@ def inheritable_thread_target(f: Callable) -> Callable:
             try:
                 # Set local properties in child thread.
                 assert SparkContext._active_spark_context is not None
-                
SparkContext._active_spark_context._jsc.sc().setLocalProperties(  # type: 
ignore[attr-defined]
-                    properties
-                )
+                
SparkContext._active_spark_context._jsc.sc().setLocalProperties(properties)
                 return f(*args, **kwargs)
             finally:
                 InheritableThread._clean_py4j_conn_for_current_thread()
@@ -372,15 +370,13 @@ class InheritableThread(threading.Thread):
     def __init__(self, target: Callable, *args: Any, **kwargs: Any):
         from pyspark import SparkContext
 
-        if isinstance(SparkContext._gateway, ClientServer):  # type: 
ignore[attr-defined]
+        if isinstance(SparkContext._gateway, ClientServer):
             # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
             def copy_local_properties(*a: Any, **k: Any) -> Any:
                 # self._props is set before starting the thread to match the 
behavior with JVM.
                 assert hasattr(self, "_props")
                 assert SparkContext._active_spark_context is not None
-                
SparkContext._active_spark_context._jsc.sc().setLocalProperties(  # type: 
ignore[attr-defined]
-                    self._props
-                )
+                
SparkContext._active_spark_context._jsc.sc().setLocalProperties(self._props)
                 try:
                     return target(*a, **k)
                 finally:
@@ -397,7 +393,7 @@ class InheritableThread(threading.Thread):
     def start(self) -> None:
         from pyspark import SparkContext
 
-        if isinstance(SparkContext._gateway, ClientServer):  # type: 
ignore[attr-defined]
+        if isinstance(SparkContext._gateway, ClientServer):
             # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
 
             # Local property copy should happen in Thread.start to mimic JVM's 
behavior.

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to