This is an automated email from the ASF dual-hosted git repository. gurwls223 pushed a commit to branch branch-3.2 in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.2 by this push: new 9cf1db3 [SPARK-35684][INFRA][PYTHON] Bump up mypy version in GitHub Actions 9cf1db3 is described below commit 9cf1db33c732d5b79d88d307fa278b2540e1437f Author: Hyukjin Kwon <gurwls...@apache.org> AuthorDate: Wed Jul 7 13:26:28 2021 +0900 [SPARK-35684][INFRA][PYTHON] Bump up mypy version in GitHub Actions ### What changes were proposed in this pull request? This PR proposes to bump up the mypy version to 0.910 which is the latest. ### Why are the changes needed? To catch the type hint mistakes better in PySpark. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? GitHub Actions should test it out. Closes #33223 from HyukjinKwon/SPARK-35684. Authored-by: Hyukjin Kwon <gurwls...@apache.org> Signed-off-by: Hyukjin Kwon <gurwls...@apache.org> (cherry picked from commit 16c195ccfb6caa4d3489ef17dfadc11bf19a6b9c) Signed-off-by: Hyukjin Kwon <gurwls...@apache.org> --- .github/workflows/build_and_test.yml | 4 +--- python/mypy.ini | 3 +++ python/pyspark/pandas/frame.py | 4 ++-- python/pyspark/pandas/indexes/base.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 3e5031b..74fc6ba 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -365,9 +365,7 @@ jobs: # See also https://github.com/sphinx-doc/sphinx/issues/7551. # Jinja2 3.0.0+ causes error when building with Sphinx. # See also https://issues.apache.org/jira/browse/SPARK-35375. - # TODO(SPARK-35684, SPARK-35683): Bump up the mypy version. This is blocked by - # Index.difference logic issue. - python3.6 -m pip install flake8 pydata_sphinx_theme 'mypy==0.812' numpydoc 'jinja2<3.0.0' 'black==21.5b2' + python3.6 -m pip install flake8 pydata_sphinx_theme 'mypy==0.910' numpydoc 'jinja2<3.0.0' 'black==21.5b2' - name: Install R linter dependencies and SparkR run: | apt-get install -y libcurl4-openssl-dev libgit2-dev libssl-dev libxml2-dev diff --git a/python/mypy.ini b/python/mypy.ini index a780266..189e2bb 100644 --- a/python/mypy.ini +++ b/python/mypy.ini @@ -153,3 +153,6 @@ ignore_missing_imports = True [mypy-sklearn.*] ignore_missing_imports = True + +[mypy-tabulate.*] +ignore_missing_imports = True diff --git a/python/pyspark/pandas/frame.py b/python/pyspark/pandas/frame.py index c500ee7..6189e17 100644 --- a/python/pyspark/pandas/frame.py +++ b/python/pyspark/pandas/frame.py @@ -351,7 +351,7 @@ def _create_tuple_for_frame_type(params: Any) -> object: from pyspark.pandas.typedef import NameTypeHolder if isinstance(params, zip): # type: ignore - params = [slice(name, tpe) for name, tpe in params] + params = [slice(name, tpe) for name, tpe in params] # type: ignore if isinstance(params, slice): params = (params,) @@ -8802,7 +8802,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})] [scol.alias(index_column) for scol, index_column in zip(scols, index_columns)] ) else: - psser = ps.Series(list(index)) + psser = ps.Series(list(index)) # type: ps.Series labels = psser._internal.spark_frame.select(psser.spark.column.alias(index_columns[0])) index_names = self._internal.index_names diff --git a/python/pyspark/pandas/indexes/base.py b/python/pyspark/pandas/indexes/base.py index c06caf0..33b817b 100644 --- a/python/pyspark/pandas/indexes/base.py +++ b/python/pyspark/pandas/indexes/base.py @@ -2014,7 +2014,7 @@ class Index(IndexOpsMixin): [isinstance(item, tuple) for item in other] ) if is_other_list_of_tuples: - other = MultiIndex.from_tuples(other) + other = MultiIndex.from_tuples(other) # type: ignore elif isinstance(other, Series): other = Index(other) else: --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org