AnandInguva commented on code in PR #26632: URL: https://github.com/apache/beam/pull/26632#discussion_r1244461264
########## sdks/python/apache_beam/ml/inference/huggingface_inference.py: ########## @@ -0,0 +1,462 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# pytype: skip-file + +import logging +import sys +from collections import defaultdict +from typing import Any +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import Optional +from typing import Sequence +from typing import Union + +import tensorflow as tf +import torch +from apache_beam.ml.inference import utils +from apache_beam.ml.inference.base import ModelHandler +from apache_beam.ml.inference.base import PredictionResult +from apache_beam.ml.inference.pytorch_inference import _convert_to_device +from transformers import AutoModel +from transformers import TFAutoModel + +__all__ = [ + 'HuggingFaceModelHandlerTensor', + 'HuggingFaceModelHandlerKeyedTensor', +] + +TensorInferenceFn = Callable[[ + Sequence[Union[torch.Tensor, tf.Tensor]], + Union[AutoModel, TFAutoModel], + torch.device, + Optional[Dict[str, Any]], + Optional[str] +], + Iterable[PredictionResult]] + +KeyedTensorInferenceFn = Callable[[ + Sequence[Dict[str, Union[torch.Tensor, tf.Tensor]]], + Union[AutoModel, TFAutoModel], + torch.device, + Optional[Dict[str, Any]], + Optional[str] +], + Iterable[PredictionResult]] + + +def _validate_constructor_args(model_uri, model_class): + message = ( + "Please provide both model class and model uri to load the model." + "Got params as model_uri={model_uri} and " + "model_class={model_class}.") + if not model_uri and not model_class: + raise RuntimeError( + message.format(model_uri=model_uri, model_class=model_class)) + elif not model_uri: + raise RuntimeError( + message.format(model_uri=model_uri, model_class=model_class)) + elif not model_class: + raise RuntimeError( + message.format(model_uri=model_uri, model_class=model_class)) + + +def _run_inference_torch_keyed_tensor( + batch: Sequence[Dict[str, Union[tf.Tensor, torch.Tensor]]], + model: AutoModel, + device, + inference_args: Dict[str, Any], + model_id: Optional[str] = None) -> Iterable[PredictionResult]: + key_to_tensor_list = defaultdict(list) + # torch.no_grad() mitigates GPU memory issues + # https://github.com/apache/beam/issues/22811 + with torch.no_grad(): + for example in batch: + for key, tensor in example.items(): + key_to_tensor_list[key].append(tensor) + key_to_batched_tensors = {} + for key in key_to_tensor_list: + batched_tensors = torch.stack(key_to_tensor_list[key]) + batched_tensors = _convert_to_device(batched_tensors, device) + key_to_batched_tensors[key] = batched_tensors + return utils._convert_to_result( + batch, model(**key_to_batched_tensors, **inference_args)) + + +def _run_inference_tensorflow_keyed_tensor( + batch: Sequence[Dict[str, Union[tf.Tensor, torch.Tensor]]], + model: TFAutoModel, + device, + inference_args: Dict[str, Any], + model_id: Optional[str] = None) -> Iterable[PredictionResult]: + key_to_tensor_list = defaultdict(list) + for example in batch: + for key, tensor in example.items(): + key_to_tensor_list[key].append(tensor) + key_to_batched_tensors = {} + for key in key_to_tensor_list: + batched_tensors = torch.stack(key_to_tensor_list[key]) + batched_tensors = key_to_tensor_list[key] + key_to_batched_tensors[key] = batched_tensors + return utils._convert_to_result( + batch, model(**key_to_batched_tensors, **inference_args)) + + +class HuggingFaceModelHandlerKeyedTensor(ModelHandler[Dict[str, Review Comment: Can we follow a pattern where we create a class `HuggingFaceModelHandler`, which would have common method across `HuggingFaceModelHandlerTensor/KeyedTensor` such as `load_model` so that we wouldn't edit the twice? For the types we can use generic types for `HuggingFaceModelHandler` XGBoostModelHandler was implemented this way https://github.com/apache/beam/blob/5f1eae622932bc3362731bcc8cf464bf678877d4/sdks/python/apache_beam/ml/inference/xgboost_inference.py#L185 ########## sdks/python/apache_beam/examples/inference/huggingface_language_modeling.py: ########## @@ -0,0 +1,180 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""""A pipeline that uses RunInference to perform Language Modeling with +model from Hugging Face. + +This pipeline takes sentences from a custom text file, converts the last word +of the sentence into a <mask> token, and then uses the AutoModelForMaskedLM from +Hugging Face to predict the best word for the masked token given all the words +already in the sentence. The pipeline then writes the prediction to an output +file in which users can then compare against the original sentence. +""" + +import argparse +import logging +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import Tuple + +import apache_beam as beam +import torch +from apache_beam.ml.inference.base import KeyedModelHandler +from apache_beam.ml.inference.base import PredictionResult +from apache_beam.ml.inference.base import RunInference +from apache_beam.ml.inference.huggingface_inference import HuggingFaceModelHandlerKeyedTensor +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.options.pipeline_options import SetupOptions +from apache_beam.runners.runner import PipelineResult +from transformers import AutoModelForMaskedLM +from transformers import AutoTokenizer + + +def add_mask_to_last_word(text: str) -> Tuple[str, str]: + text_list = text.split() + return text, ' '.join(text_list[:-2] + ['<mask>', text_list[-1]]) + + +def tokenize_sentence( + text_and_mask: Tuple[str, str], + tokenizer: AutoTokenizer) -> Tuple[str, Dict[str, torch.Tensor]]: + text, masked_text = text_and_mask + tokenized_sentence = tokenizer.encode_plus(masked_text, return_tensors="pt") + + # Workaround to manually remove batch dim until we have the feature to + # add optional batching flag. + # TODO(https://github.com/apache/beam/issues/21863): Remove once optional + # batching flag added + return text, { + k: torch.squeeze(v) + for k, v in dict(tokenized_sentence).items() + } + + +def filter_empty_lines(text: str) -> Iterator[str]: + if len(text.strip()) > 0: + yield text + + +class PostProcessor(beam.DoFn): + """Processes the PredictionResult to get the predicted word. + + The logits are the output of the Model. After applying a softmax + activation function to the logits, we get probabilistic distributions for each + of the words in the model's vocabulary. We can get the word with the highest + probability of being a candidate replacement word by taking the argmax. + """ + def __init__(self, tokenizer: AutoTokenizer): + super().__init__() + self.tokenizer = tokenizer + + def process(self, element: Tuple[str, PredictionResult]) -> Iterable[str]: + text, prediction_result = element + inputs = prediction_result.example + logits = prediction_result.inference['logits'] + mask_token_index = torch.where( + inputs["input_ids"] == self.tokenizer.mask_token_id)[0] + predicted_token_id = logits[mask_token_index].argmax(axis=-1) + decoded_word = self.tokenizer.decode(predicted_token_id) + yield text + ';' + decoded_word + + +def parse_known_args(argv): + """Parses args for the workflow.""" + parser = argparse.ArgumentParser() + parser.add_argument( + '--input', + dest='input', + help='Path to the text file containing sentences.') + parser.add_argument( + '--output', + dest='output', + required=True, + help='Path of file in which to save the output predictions.') + parser.add_argument( + '--model_name', + dest='model_name', + required=True, + help='bert uncased model. This can be base model or large model') + parser.add_argument( + '--model_class', + dest='model_class', + default=AutoModelForMaskedLM, + help="Name of the model from Hugging Face") + return parser.parse_known_args(argv) + + +def run( + argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult: + """ + Args: + argv: Command line arguments defined for this example. + model_class: Reference to the class definition of the model. + model_name: Name of the pretrained model to be loaded. + save_main_session: Used for internal testing. + test_pipeline: Used for internal testing. + """ + known_args, pipeline_args = parse_known_args(argv) + pipeline_options = PipelineOptions(pipeline_args) + pipeline_options.view_as(SetupOptions).save_main_session = save_main_session + + pipeline = test_pipeline + if not test_pipeline: + pipeline = beam.Pipeline(options=pipeline_options) + + tokenizer = AutoTokenizer.from_pretrained(known_args.model_name) + + model_handler = HuggingFaceModelHandlerKeyedTensor( + model_uri=known_args.model_name, + model_class=known_args.model_class, + max_batch_size=1) + if not known_args.input: + text = ( + pipeline | 'CreateSentences' >> beam.Create([ + 'The capital of France is Paris .', + 'It is raining cats and dogs .', + 'Today is Monday and tomorrow is Tuesday .', + 'There are 5 coconuts on this palm tree .', + 'The strongest person in the world is not famous .', + 'The secret ingredient to his wonderful life was gratitude .', + 'The biggest animal in the world is the whale .', + ])) + else: + text = ( + pipeline | 'ReadSentences' >> beam.io.ReadFromText(known_args.input)) + text_and_tokenized_text_tuple = ( + text + | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines) + | 'AddMask' >> beam.Map(add_mask_to_last_word) + | + 'TokenizeSentence' >> beam.Map(lambda x: tokenize_sentence(x, tokenizer))) + output = ( + text_and_tokenized_text_tuple + | 'RunInference' >> RunInference(KeyedModelHandler(model_handler)) + | 'ProcessOutput' >> beam.ParDo(PostProcessor(tokenizer=tokenizer))) + output | "WriteOutput" >> beam.io.WriteToText( # pylint: disable=expression-not-assigned Review Comment: ```suggestion _ = output | "WriteOutput" >> beam.io.WriteToText( # pylint: disable=expression-not-assigned ``` we can remove the pylint warning by assigning the expression to `_` ########## sdks/python/tox.ini: ########## @@ -400,3 +401,21 @@ commands = # Run all XGBoost unit tests # Allow exit code 5 (no tests run) so that we can run this command safely on arbitrary subdirectories. /bin/sh -c 'pytest -o junit_suite_name={envname} --junitxml=pytest_{envname}.xml -n 6 -m uses_xgboost {posargs}; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' + +[testenv:py{38,39,310}-transformers-{428,429,430}] Review Comment: 311? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
