ryanthompson591 commented on code in PR #22088: URL: https://github.com/apache/beam/pull/22088#discussion_r911326977
########## sdks/python/apache_beam/examples/inference/sklearn_japanese_housing_regression.py: ########## @@ -0,0 +1,161 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A pipeline that uses RunInference API on a regression about housing prices. + +This example uses the japanese housing data from kaggle. + +Since the data has missing fields, this example illustrates how to split +data and assign it to appropriate models. The predictions are then recombined. + +In order to set this example up, you will need two things. +1. Build models (or use ours) and reference those via the model directory. +2. Download the data from kaggle and host it. +""" + +import argparse +from typing import Iterable + +import apache_beam as beam +from apache_beam.io.filesystems import FileSystems +from apache_beam.ml.inference.base import RunInference +from apache_beam.ml.inference.sklearn_inference import ModelFileType +from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerPandas +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.options.pipeline_options import SetupOptions +import pandas + +MODELS = [{ + 'name': 'all_features', + 'required_features': [ + 'Area', + 'Year', + 'MinTimeToNearestStation', + 'MaxTimeToNearestStation', + 'TotalFloorArea', + 'Frontage', + 'Breadth', + 'BuildingYear' + ] +}, + { + 'name': 'floor_area', + 'required_features': ['Area', 'Year', 'TotalFloorArea'] + }, + { + 'name': 'stations', + 'required_features': [ + 'Area', + 'Year', + 'MinTimeToNearestStation', + 'MaxTimeToNearestStation' + ] + }, { + 'name': 'no_features', 'required_features': ['Area', 'Year'] + }] + + +def sort_by_features(dataframe, max_size): + """ Partitions the dataframe by what data it has available.""" + for i, model in enumerate(MODELS): + required_features = dataframe[model['required_features']] + if not required_features.isnull().any().any(): + return i + return -1 + + +def load_dataframe(file_name: str) -> Iterable[pandas.DataFrame]: Review Comment: I can make it a DoFn, however, we do support lightweight functions. See 4.2.1.3. Lightweight DoFns and other abstractions in the same docs you referenced. ########## sdks/python/apache_beam/examples/inference/sklearn_japanese_housing_regression.py: ########## @@ -0,0 +1,161 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A pipeline that uses RunInference API on a regression about housing prices. + +This example uses the japanese housing data from kaggle. Review Comment: Done ########## sdks/python/apache_beam/examples/inference/sklearn_japanese_housing_regression.py: ########## @@ -0,0 +1,164 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A pipeline that uses RunInference API on a regression about housing prices. + +This example uses the japanese housing data from kaggle. +https://www.kaggle.com/datasets/nishiodens/japan-real-estate-transaction-prices + +Since the data has missing fields, this example illustrates how to split +data and assign it to the models that are trained on different subsets of +features. The predictions are then recombined. + +In order to set this example up, you will need two things. +1. Build models (or use ours) and reference those via the model directory. +2. Download the data from kaggle and host it. +""" + +import argparse +from typing import Iterable + +import apache_beam as beam +from apache_beam.io.filesystems import FileSystems +from apache_beam.ml.inference.base import RunInference +from apache_beam.ml.inference.sklearn_inference import ModelFileType +from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerPandas +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.options.pipeline_options import SetupOptions +import pandas + +MODELS = [{ + 'name': 'all_features', + 'required_features': [ + 'Area', + 'Year', + 'MinTimeToNearestStation', + 'MaxTimeToNearestStation', + 'TotalFloorArea', + 'Frontage', + 'Breadth', + 'BuildingYear' + ] +}, + { + 'name': 'floor_area', + 'required_features': ['Area', 'Year', 'TotalFloorArea'] + }, + { + 'name': 'stations', + 'required_features': [ + 'Area', + 'Year', + 'MinTimeToNearestStation', + 'MaxTimeToNearestStation' + ] + }, { + 'name': 'no_features', 'required_features': ['Area', 'Year'] + }] + + +def sort_by_features(dataframe, max_size): + """ Partitions the dataframe by what data it has available.""" + for i, model in enumerate(MODELS): + required_features = dataframe[model['required_features']] + if not required_features.isnull().any().any(): + return i + return -1 + + +class LoadDataframe(beam.DoFn): + def process(self, file_name: str) -> Iterable[pandas.DataFrame]: + """ Loads data files as a pandas dataframe.""" + file = FileSystems.open(file_name, 'rb') + dataframe = pandas.read_csv(file) + for i in range(dataframe.shape[0]): + yield dataframe.iloc[[i]] + + +def report_predictions(prediction_result): + true_result = prediction_result.example['TradePrice'].values[0] + inference = prediction_result.inference + return 'True Price %.1f, Predicted Price %f' % (true_result, inference) + + +def parse_known_args(argv): + """Parses args for the workflow.""" + parser = argparse.ArgumentParser() + parser.add_argument( + '--input', + dest='input', + required=True, + help='A metadata file with all models, with references to models and ' + 'information about all the files and data.') + parser.add_argument( + '--model_path', + dest='model_path', Review Comment: Hrm, I prefer model_path. I think even though there are many models each model will have the same path. I was trying to think of why, so if you had a path containing libraries, you would probably call it lib_path or library_path rather than libraries_path. ########## sdks/python/apache_beam/examples/inference/sklearn_japanese_housing_regression.py: ########## @@ -0,0 +1,164 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A pipeline that uses RunInference API on a regression about housing prices. + +This example uses the japanese housing data from kaggle. +https://www.kaggle.com/datasets/nishiodens/japan-real-estate-transaction-prices + +Since the data has missing fields, this example illustrates how to split +data and assign it to the models that are trained on different subsets of +features. The predictions are then recombined. + +In order to set this example up, you will need two things. +1. Build models (or use ours) and reference those via the model directory. Review Comment: I added details to the README.md file. This was set up to be a performance test and less of an integration test. Anand and I plan to automate this into a test in a separate PR focussing specifically on that. Also to respond to comment that we can have users download them, they don't even need to download them, since they can just point to our publicly available models. Right now I think those models might not be publicly available since we are working on making that directory publicly available. ########## sdks/python/apache_beam/examples/inference/sklearn_japanese_housing_regression.py: ########## @@ -0,0 +1,164 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A pipeline that uses RunInference API on a regression about housing prices. + +This example uses the japanese housing data from kaggle. +https://www.kaggle.com/datasets/nishiodens/japan-real-estate-transaction-prices + +Since the data has missing fields, this example illustrates how to split +data and assign it to the models that are trained on different subsets of +features. The predictions are then recombined. + +In order to set this example up, you will need two things. +1. Build models (or use ours) and reference those via the model directory. +2. Download the data from kaggle and host it. +""" + +import argparse +from typing import Iterable + +import apache_beam as beam +from apache_beam.io.filesystems import FileSystems +from apache_beam.ml.inference.base import RunInference +from apache_beam.ml.inference.sklearn_inference import ModelFileType +from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerPandas +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.options.pipeline_options import SetupOptions +import pandas + +MODELS = [{ + 'name': 'all_features', + 'required_features': [ + 'Area', + 'Year', + 'MinTimeToNearestStation', + 'MaxTimeToNearestStation', + 'TotalFloorArea', + 'Frontage', + 'Breadth', + 'BuildingYear' + ] +}, + { + 'name': 'floor_area', + 'required_features': ['Area', 'Year', 'TotalFloorArea'] + }, + { + 'name': 'stations', + 'required_features': [ + 'Area', + 'Year', + 'MinTimeToNearestStation', + 'MaxTimeToNearestStation' + ] + }, { + 'name': 'no_features', 'required_features': ['Area', 'Year'] + }] + + +def sort_by_features(dataframe, max_size): + """ Partitions the dataframe by what data it has available.""" + for i, model in enumerate(MODELS): + required_features = dataframe[model['required_features']] + if not required_features.isnull().any().any(): + return i + return -1 + + +class LoadDataframe(beam.DoFn): + def process(self, file_name: str) -> Iterable[pandas.DataFrame]: + """ Loads data files as a pandas dataframe.""" + file = FileSystems.open(file_name, 'rb') + dataframe = pandas.read_csv(file) + for i in range(dataframe.shape[0]): + yield dataframe.iloc[[i]] + + +def report_predictions(prediction_result): + true_result = prediction_result.example['TradePrice'].values[0] + inference = prediction_result.inference + return 'True Price %.1f, Predicted Price %f' % (true_result, inference) + + +def parse_known_args(argv): + """Parses args for the workflow.""" + parser = argparse.ArgumentParser() + parser.add_argument( + '--input', Review Comment: This could be a list of files, I thought input was more simple, but it did make me realize that I needed to update the comment. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
