This is an automated email from the ASF dual-hosted git repository.
zero323 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new add939f [SPARK-37409][PYTHON][ML] Inline hints for pyspark.ml.pipeline
add939f is described below
commit add939f2c852f49f18fd770382f375e00b76e679
Author: zero323 <[email protected]>
AuthorDate: Tue Feb 8 01:51:12 2022 +0100
[SPARK-37409][PYTHON][ML] Inline hints for pyspark.ml.pipeline
### What changes were proposed in this pull request?
This PR migrates type `pyspark.ml.pipeline` annotations from stub file to
inline type hints.
### Why are the changes needed?
Part of ongoing migration of type hints.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
Existing tests.
Closes #35408 from zero323/SPARK-37409.
Authored-by: zero323 <[email protected]>
Signed-off-by: zero323 <[email protected]>
---
python/pyspark/ml/param/__init__.py | 2 +-
python/pyspark/ml/pipeline.py | 131 ++++++++++++++++++++++--------------
python/pyspark/ml/pipeline.pyi | 98 ---------------------------
python/pyspark/ml/util.py | 7 +-
4 files changed, 84 insertions(+), 154 deletions(-)
diff --git a/python/pyspark/ml/param/__init__.py
b/python/pyspark/ml/param/__init__.py
index fd5ed63..ee2c289 100644
--- a/python/pyspark/ml/param/__init__.py
+++ b/python/pyspark/ml/param/__init__.py
@@ -569,7 +569,7 @@ class Params(Identifiable, metaclass=ABCMeta):
to._set(**{param.name: paramMap[param]})
return to
- def _resetUid(self, newUid: Any) -> "Params":
+ def _resetUid(self: "P", newUid: Any) -> "P":
"""
Changes the uid of this instance. This updates both
the stored uid and the parent uid of params and param maps.
diff --git a/python/pyspark/ml/pipeline.py b/python/pyspark/ml/pipeline.py
index 57ca47e..1da86ec 100644
--- a/python/pyspark/ml/pipeline.py
+++ b/python/pyspark/ml/pipeline.py
@@ -16,6 +16,8 @@
#
import os
+from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast,
TYPE_CHECKING
+
from pyspark import keyword_only, since, SparkContext
from pyspark.ml.base import Estimator, Model, Transformer
from pyspark.ml.param import Param, Params
@@ -28,14 +30,20 @@ from pyspark.ml.util import (
DefaultParamsWriter,
MLWriter,
MLReader,
+ JavaMLReadable,
JavaMLWritable,
)
from pyspark.ml.wrapper import JavaParams
from pyspark.ml.common import inherit_doc
+from pyspark.sql.dataframe import DataFrame
+
+if TYPE_CHECKING:
+ from pyspark.ml._typing import ParamMap, PipelineStage
+ from py4j.java_gateway import JavaObject # type: ignore[import]
@inherit_doc
-class Pipeline(Estimator, MLReadable, MLWritable):
+class Pipeline(Estimator["PipelineModel"], MLReadable["Pipeline"], MLWritable):
"""
A simple pipeline, which acts as an estimator. A Pipeline consists
of a sequence of stages, each of which is either an
@@ -56,10 +64,14 @@ class Pipeline(Estimator, MLReadable, MLWritable):
.. versionadded:: 1.3.0
"""
- stages = Param(Params._dummy(), "stages", "a list of pipeline stages")
+ stages: Param[List["PipelineStage"]] = Param(
+ Params._dummy(), "stages", "a list of pipeline stages"
+ )
+
+ _input_kwargs: Dict[str, Any]
@keyword_only
- def __init__(self, *, stages=None):
+ def __init__(self, *, stages: Optional[List["PipelineStage"]] = None):
"""
__init__(self, \\*, stages=None)
"""
@@ -67,7 +79,7 @@ class Pipeline(Estimator, MLReadable, MLWritable):
kwargs = self._input_kwargs
self.setParams(**kwargs)
- def setStages(self, value):
+ def setStages(self, value: List["PipelineStage"]) -> "Pipeline":
"""
Set pipeline stages.
@@ -87,7 +99,7 @@ class Pipeline(Estimator, MLReadable, MLWritable):
return self._set(stages=value)
@since("1.3.0")
- def getStages(self):
+ def getStages(self) -> List["PipelineStage"]:
"""
Get pipeline stages.
"""
@@ -95,7 +107,7 @@ class Pipeline(Estimator, MLReadable, MLWritable):
@keyword_only
@since("1.3.0")
- def setParams(self, *, stages=None):
+ def setParams(self, *, stages: Optional[List["PipelineStage"]] = None) ->
"Pipeline":
"""
setParams(self, \\*, stages=None)
Sets params for Pipeline.
@@ -103,7 +115,7 @@ class Pipeline(Estimator, MLReadable, MLWritable):
kwargs = self._input_kwargs
return self._set(**kwargs)
- def _fit(self, dataset):
+ def _fit(self, dataset: DataFrame) -> "PipelineModel":
stages = self.getStages()
for stage in stages:
if not (isinstance(stage, Estimator) or isinstance(stage,
Transformer)):
@@ -112,7 +124,7 @@ class Pipeline(Estimator, MLReadable, MLWritable):
for i, stage in enumerate(stages):
if isinstance(stage, Estimator):
indexOfLastEstimator = i
- transformers = []
+ transformers: List[Transformer] = []
for i, stage in enumerate(stages):
if i <= indexOfLastEstimator:
if isinstance(stage, Transformer):
@@ -124,10 +136,10 @@ class Pipeline(Estimator, MLReadable, MLWritable):
if i < indexOfLastEstimator:
dataset = model.transform(dataset)
else:
- transformers.append(stage)
+ transformers.append(cast(Transformer, stage))
return PipelineModel(transformers)
- def copy(self, extra=None):
+ def copy(self, extra: Optional["ParamMap"] = None) -> "Pipeline":
"""
Creates a copy of this instance.
@@ -150,21 +162,21 @@ class Pipeline(Estimator, MLReadable, MLWritable):
return that.setStages(stages)
@since("2.0.0")
- def write(self):
+ def write(self) -> MLWriter:
"""Returns an MLWriter instance for this ML instance."""
allStagesAreJava =
PipelineSharedReadWrite.checkStagesForJava(self.getStages())
if allStagesAreJava:
- return JavaMLWriter(self)
+ return JavaMLWriter(self) # type: ignore[arg-type]
return PipelineWriter(self)
@classmethod
@since("2.0.0")
- def read(cls):
+ def read(cls) -> "PipelineReader":
"""Returns an MLReader instance for this class."""
return PipelineReader(cls)
@classmethod
- def _from_java(cls, java_stage):
+ def _from_java(cls, java_stage: "JavaObject") -> "Pipeline":
"""
Given a Java Pipeline, create and return a Python wrapper of it.
Used for ML persistence.
@@ -172,12 +184,14 @@ class Pipeline(Estimator, MLReadable, MLWritable):
# Create a new instance of this stage.
py_stage = cls()
# Load information from java_stage to the instance.
- py_stages = [JavaParams._from_java(s) for s in java_stage.getStages()]
+ py_stages: List["PipelineStage"] = [
+ JavaParams._from_java(s) for s in java_stage.getStages()
+ ]
py_stage.setStages(py_stages)
py_stage._resetUid(java_stage.uid())
return py_stage
- def _to_java(self):
+ def _to_java(self) -> "JavaObject":
"""
Transfer this instance to a Java Pipeline. Used for ML persistence.
@@ -188,10 +202,12 @@ class Pipeline(Estimator, MLReadable, MLWritable):
"""
gateway = SparkContext._gateway
+ assert gateway is not None and SparkContext._jvm is not None
+
cls = SparkContext._jvm.org.apache.spark.ml.PipelineStage
java_stages = gateway.new_array(cls, len(self.getStages()))
for idx, stage in enumerate(self.getStages()):
- java_stages[idx] = stage._to_java()
+ java_stages[idx] = cast(JavaParams, stage)._to_java()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.Pipeline",
self.uid)
_java_obj.setStages(java_stages)
@@ -205,30 +221,30 @@ class PipelineWriter(MLWriter):
(Private) Specialization of :py:class:`MLWriter` for :py:class:`Pipeline`
types
"""
- def __init__(self, instance):
+ def __init__(self, instance: Pipeline):
super(PipelineWriter, self).__init__()
self.instance = instance
- def saveImpl(self, path):
+ def saveImpl(self, path: str) -> None:
stages = self.instance.getStages()
PipelineSharedReadWrite.validateStages(stages)
PipelineSharedReadWrite.saveImpl(self.instance, stages, self.sc, path)
@inherit_doc
-class PipelineReader(MLReader):
+class PipelineReader(MLReader[Pipeline]):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`Pipeline`
types
"""
- def __init__(self, cls):
+ def __init__(self, cls: Type[Pipeline]):
super(PipelineReader, self).__init__()
self.cls = cls
- def load(self, path):
+ def load(self, path: str) -> Pipeline:
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if "language" not in metadata["paramMap"] or
metadata["paramMap"]["language"] != "Python":
- return JavaMLReader(self.cls).load(path)
+ return JavaMLReader(cast(Type["JavaMLReadable[Pipeline]"],
self.cls)).load(path)
else:
uid, stages = PipelineSharedReadWrite.load(metadata, self.sc, path)
return Pipeline(stages=stages)._resetUid(uid)
@@ -240,53 +256,55 @@ class PipelineModelWriter(MLWriter):
(Private) Specialization of :py:class:`MLWriter` for
:py:class:`PipelineModel` types
"""
- def __init__(self, instance):
+ def __init__(self, instance: "PipelineModel"):
super(PipelineModelWriter, self).__init__()
self.instance = instance
- def saveImpl(self, path):
+ def saveImpl(self, path: str) -> None:
stages = self.instance.stages
- PipelineSharedReadWrite.validateStages(stages)
- PipelineSharedReadWrite.saveImpl(self.instance, stages, self.sc, path)
+ PipelineSharedReadWrite.validateStages(cast(List["PipelineStage"],
stages))
+ PipelineSharedReadWrite.saveImpl(
+ self.instance, cast(List["PipelineStage"], stages), self.sc, path
+ )
@inherit_doc
-class PipelineModelReader(MLReader):
+class PipelineModelReader(MLReader["PipelineModel"]):
"""
(Private) Specialization of :py:class:`MLReader` for
:py:class:`PipelineModel` types
"""
- def __init__(self, cls):
+ def __init__(self, cls: Type["PipelineModel"]):
super(PipelineModelReader, self).__init__()
self.cls = cls
- def load(self, path):
+ def load(self, path: str) -> "PipelineModel":
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if "language" not in metadata["paramMap"] or
metadata["paramMap"]["language"] != "Python":
- return JavaMLReader(self.cls).load(path)
+ return JavaMLReader(cast(Type["JavaMLReadable[PipelineModel]"],
self.cls)).load(path)
else:
uid, stages = PipelineSharedReadWrite.load(metadata, self.sc, path)
- return PipelineModel(stages=stages)._resetUid(uid)
+ return PipelineModel(stages=cast(List[Transformer],
stages))._resetUid(uid)
@inherit_doc
-class PipelineModel(Model, MLReadable, MLWritable):
+class PipelineModel(Model, MLReadable["PipelineModel"], MLWritable):
"""
Represents a compiled pipeline with transformers and fitted models.
.. versionadded:: 1.3.0
"""
- def __init__(self, stages):
+ def __init__(self, stages: List[Transformer]):
super(PipelineModel, self).__init__()
self.stages = stages
- def _transform(self, dataset):
+ def _transform(self, dataset: DataFrame) -> DataFrame:
for t in self.stages:
dataset = t.transform(dataset)
return dataset
- def copy(self, extra=None):
+ def copy(self, extra: Optional["ParamMap"] = None) -> "PipelineModel":
"""
Creates a copy of this instance.
@@ -301,33 +319,35 @@ class PipelineModel(Model, MLReadable, MLWritable):
return PipelineModel(stages)
@since("2.0.0")
- def write(self):
+ def write(self) -> MLWriter:
"""Returns an MLWriter instance for this ML instance."""
- allStagesAreJava =
PipelineSharedReadWrite.checkStagesForJava(self.stages)
+ allStagesAreJava = PipelineSharedReadWrite.checkStagesForJava(
+ cast(List["PipelineStage"], self.stages)
+ )
if allStagesAreJava:
- return JavaMLWriter(self)
+ return JavaMLWriter(self) # type: ignore[arg-type]
return PipelineModelWriter(self)
@classmethod
@since("2.0.0")
- def read(cls):
+ def read(cls) -> PipelineModelReader:
"""Returns an MLReader instance for this class."""
return PipelineModelReader(cls)
@classmethod
- def _from_java(cls, java_stage):
+ def _from_java(cls, java_stage: "JavaObject") -> "PipelineModel":
"""
Given a Java PipelineModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
- py_stages = [JavaParams._from_java(s) for s in java_stage.stages()]
+ py_stages: List[Transformer] = [JavaParams._from_java(s) for s in
java_stage.stages()]
# Create a new instance of this stage.
py_stage = cls(py_stages)
py_stage._resetUid(java_stage.uid())
return py_stage
- def _to_java(self):
+ def _to_java(self) -> "JavaObject":
"""
Transfer this instance to a Java PipelineModel. Used for ML
persistence.
@@ -335,10 +355,12 @@ class PipelineModel(Model, MLReadable, MLWritable):
"""
gateway = SparkContext._gateway
+ assert gateway is not None and SparkContext._jvm is not None
+
cls = SparkContext._jvm.org.apache.spark.ml.Transformer
java_stages = gateway.new_array(cls, len(self.stages))
for idx, stage in enumerate(self.stages):
- java_stages[idx] = stage._to_java()
+ java_stages[idx] = cast(JavaParams, stage)._to_java()
_java_obj = JavaParams._new_java_obj(
"org.apache.spark.ml.PipelineModel", self.uid, java_stages
@@ -357,11 +379,11 @@ class PipelineSharedReadWrite:
"""
@staticmethod
- def checkStagesForJava(stages):
+ def checkStagesForJava(stages: List["PipelineStage"]) -> bool:
return all(isinstance(stage, JavaMLWritable) for stage in stages)
@staticmethod
- def validateStages(stages):
+ def validateStages(stages: List["PipelineStage"]) -> None:
"""
Check that all stages are Writable
"""
@@ -375,7 +397,12 @@ class PipelineSharedReadWrite:
)
@staticmethod
- def saveImpl(instance, stages, sc, path):
+ def saveImpl(
+ instance: Union[Pipeline, PipelineModel],
+ stages: List["PipelineStage"],
+ sc: SparkContext,
+ path: str,
+ ) -> None:
"""
Save metadata and stages for a :py:class:`Pipeline` or
:py:class:`PipelineModel`
- save metadata to path/metadata
@@ -386,12 +413,14 @@ class PipelineSharedReadWrite:
DefaultParamsWriter.saveMetadata(instance, path, sc,
paramMap=jsonParams)
stagesDir = os.path.join(path, "stages")
for index, stage in enumerate(stages):
- stage.write().save(
+ cast(MLWritable, stage).write().save(
PipelineSharedReadWrite.getStagePath(stage.uid, index,
len(stages), stagesDir)
)
@staticmethod
- def load(metadata, sc, path):
+ def load(
+ metadata: Dict[str, Any], sc: SparkContext, path: str
+ ) -> Tuple[str, List["PipelineStage"]]:
"""
Load metadata and stages for a :py:class:`Pipeline` or
:py:class:`PipelineModel`
@@ -407,12 +436,12 @@ class PipelineSharedReadWrite:
stagePath = PipelineSharedReadWrite.getStagePath(
stageUid, index, len(stageUids), stagesDir
)
- stage = DefaultParamsReader.loadParamsInstance(stagePath, sc)
+ stage: "PipelineStage" =
DefaultParamsReader.loadParamsInstance(stagePath, sc)
stages.append(stage)
return (metadata["uid"], stages)
@staticmethod
- def getStagePath(stageUid, stageIdx, numStages, stagesDir):
+ def getStagePath(stageUid: str, stageIdx: int, numStages: int, stagesDir:
str) -> str:
"""
Get path for saving the given stage.
"""
diff --git a/python/pyspark/ml/pipeline.pyi b/python/pyspark/ml/pipeline.pyi
deleted file mode 100644
index 7b38900..0000000
--- a/python/pyspark/ml/pipeline.pyi
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-from typing import Any, Dict, List, Optional, Tuple, Type, Union
-
-from pyspark.ml._typing import PipelineStage
-from pyspark.context import SparkContext
-from pyspark.ml.base import Estimator, Model, Transformer
-from pyspark.ml.param import Param
-from pyspark.ml.util import ( # noqa: F401
- DefaultParamsReader as DefaultParamsReader,
- DefaultParamsWriter as DefaultParamsWriter,
- JavaMLReader as JavaMLReader,
- JavaMLWritable as JavaMLWritable,
- JavaMLWriter as JavaMLWriter,
- MLReadable as MLReadable,
- MLReader as MLReader,
- MLWritable as MLWritable,
- MLWriter as MLWriter,
-)
-from pyspark.sql.dataframe import DataFrame
-
-class Pipeline(Estimator[PipelineModel], MLReadable[Pipeline], MLWritable):
- stages: List[PipelineStage]
- def __init__(self, *, stages: Optional[List[PipelineStage]] = ...) ->
None: ...
- def _fit(self, dataset: DataFrame) -> PipelineModel: ...
- def setStages(self, stages: List[PipelineStage]) -> Pipeline: ...
- def getStages(self) -> List[PipelineStage]: ...
- def setParams(self, *, stages: Optional[List[PipelineStage]] = ...) ->
Pipeline: ...
- def copy(self, extra: Optional[Dict[Param, str]] = ...) -> Pipeline: ...
- def write(self) -> JavaMLWriter: ...
- def save(self, path: str) -> None: ...
- @classmethod
- def read(cls) -> PipelineReader: ...
-
-class PipelineWriter(MLWriter):
- instance: Pipeline
- def __init__(self, instance: Pipeline) -> None: ...
- def saveImpl(self, path: str) -> None: ...
-
-class PipelineReader(MLReader[Pipeline]):
- cls: Type[Pipeline]
- def __init__(self, cls: Type[Pipeline]) -> None: ...
- def load(self, path: str) -> Pipeline: ...
-
-class PipelineModelWriter(MLWriter):
- instance: PipelineModel
- def __init__(self, instance: PipelineModel) -> None: ...
- def saveImpl(self, path: str) -> None: ...
-
-class PipelineModelReader(MLReader[PipelineModel]):
- cls: Type[PipelineModel]
- def __init__(self, cls: Type[PipelineModel]) -> None: ...
- def load(self, path: str) -> PipelineModel: ...
-
-class PipelineModel(Model, MLReadable[PipelineModel], MLWritable):
- stages: List[PipelineStage]
- def __init__(self, stages: List[Transformer]) -> None: ...
- def _transform(self, dataset: DataFrame) -> DataFrame: ...
- def copy(self, extra: Optional[Dict[Param, Any]] = ...) -> PipelineModel:
...
- def write(self) -> JavaMLWriter: ...
- def save(self, path: str) -> None: ...
- @classmethod
- def read(cls) -> PipelineModelReader: ...
-
-class PipelineSharedReadWrite:
- @staticmethod
- def checkStagesForJava(stages: List[PipelineStage]) -> bool: ...
- @staticmethod
- def validateStages(stages: List[PipelineStage]) -> None: ...
- @staticmethod
- def saveImpl(
- instance: Union[Pipeline, PipelineModel],
- stages: List[PipelineStage],
- sc: SparkContext,
- path: str,
- ) -> None: ...
- @staticmethod
- def load(
- metadata: Dict[str, Any], sc: SparkContext, path: str
- ) -> Tuple[str, List[PipelineStage]]: ...
- @staticmethod
- def getStagePath(stageUid: str, stageIdx: int, numStages: int, stagesDir:
str) -> str: ...
diff --git a/python/pyspark/ml/util.py b/python/pyspark/ml/util.py
index 1dacffc..019420b 100644
--- a/python/pyspark/ml/util.py
+++ b/python/pyspark/ml/util.py
@@ -32,7 +32,6 @@ if TYPE_CHECKING:
from py4j.java_gateway import JavaGateway, JavaObject
from pyspark.ml._typing import PipelineStage
- from pyspark.ml.param import Param
from pyspark.ml.base import Params
from pyspark.ml.wrapper import JavaWrapper
@@ -429,7 +428,7 @@ class DefaultParamsWriter(MLWriter):
path: str,
sc: SparkContext,
extraMetadata: Optional[Dict[str, Any]] = None,
- paramMap: Optional[Dict[str, "Param"]] = None,
+ paramMap: Optional[Dict[str, Any]] = None,
) -> None:
"""
Saves metadata + Params to: path + "/metadata"
@@ -460,7 +459,7 @@ class DefaultParamsWriter(MLWriter):
instance: "Params",
sc: SparkContext,
extraMetadata: Optional[Dict[str, Any]] = None,
- paramMap: Optional[Dict[str, "Param"]] = None,
+ paramMap: Optional[Dict[str, Any]] = None,
) -> str:
"""
Helper for :py:meth:`DefaultParamsWriter.saveMetadata` which extracts
the JSON to save.
@@ -696,7 +695,7 @@ class MetaAlgorithmReadWrite:
if isinstance(pyInstance, Pipeline):
pySubStages = pyInstance.getStages()
elif isinstance(pyInstance, PipelineModel):
- pySubStages = pyInstance.stages
+ pySubStages = cast(List["PipelineStage"], pyInstance.stages)
elif isinstance(pyInstance, _ValidatorParams):
raise ValueError("PySpark does not support nested validator.")
elif isinstance(pyInstance, OneVsRest):
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]