damccorm commented on code in PR #23554:
URL: https://github.com/apache/beam/pull/23554#discussion_r995829097


##########
sdks/python/apache_beam/examples/inference/runinference_metrics/main.py:
##########
@@ -0,0 +1,128 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the pipeline for loading a ML model, and exploring
+the different RunInference metrics."""
+import argparse
+import logging
+import sys
+
+import apache_beam as beam
+import config as cfg
+from apache_beam.ml.inference import RunInference
+from apache_beam.ml.inference.base import KeyedModelHandler
+from apache_beam.ml.inference.pytorch_inference import 
PytorchModelHandlerKeyedTensor
+from pipeline.options import get_pipeline_options
+from pipeline.transformations import CustomPytorchModelHandlerKeyedTensor
+from pipeline.transformations import HuggingFaceStripBatchingWrapper
+from pipeline.transformations import PostProcessor
+from pipeline.transformations import Tokenize
+from transformers import DistilBertConfig
+
+
+def parse_arguments(argv):
+  """
+    Parses the arguments passed to the command line and
+    returns them as an object
+    Args:
+      argv: The arguments passed to the command line.
+    Returns:
+      The arguments that are being passed in.
+    """
+  parser = argparse.ArgumentParser(description="benchmark-runinference")
+
+  parser.add_argument(
+      "-m",
+      "--mode",
+      help="Mode to run pipeline in.",
+      choices=["local", "cloud"],
+      default="local",
+  )
+  parser.add_argument(
+      "-p",
+      "--project",
+      help="GCP project to run pipeline on.",
+      default=cfg.PROJECT_ID,
+  )
+  parser.add_argument(
+      "-d",
+      "--device",
+      help="Device to run the dataflow job on",
+      choices=["CPU", "GPU"],
+      default="CPU",
+  )
+
+  args, _ = parser.parse_known_args(args=argv)
+  return args
+
+
+def run():
+  """
+    Runs the pipeline that loads a transformer based text classification model
+    and does inference on a list of sentences.
+    At the end of pipeline, different metrics like latency,
+    throughput and others are printed.
+    """
+  args = parse_arguments(sys.argv)
+
+  inputs = [
+      "This is the worst food I have ever eaten",
+      "In my soul and in my heart, I’m convinced I’m wrong!",
+      "Be with me always—take any form—drive me mad!"\
+      "only do not leave me in this abyss, where I cannot find you!",
+      "Do I want to live? Would you like to live with your soul in the grave?",
+      "Honest people don’t hide their deeds.",
+      "Nelly, I am Heathcliff!  He’s always,"\
+      "always in my mind: not as a pleasure,"\
+      "any more than I am always a pleasure to myself, but as my own being.",
+  ] * 1000
+
+  pipeline_options = get_pipeline_options(
+      job_name=cfg.JOB_NAME,
+      num_workers=cfg.NUM_WORKERS,
+      project=args.project,
+      mode=args.mode,
+      device=args.device,
+  )
+  model_handler_class = (
+      PytorchModelHandlerKeyedTensor
+      if args.device == "GPU" else CustomPytorchModelHandlerKeyedTensor)
+  device = "cuda:0" if args.device == "GPU" else args.device
+  model_handler = model_handler_class(
+      state_dict_path=cfg.MODEL_STATE_DICT_PATH,
+      model_class=HuggingFaceStripBatchingWrapper,
+      model_params={
+          "config": DistilBertConfig.from_pretrained(cfg.MODEL_CONFIG_PATH)
+      },
+      device=device,
+  )
+
+  with beam.Pipeline(options=pipeline_options) as pipeline:
+    _ = (
+        pipeline
+        | "Create inputs" >> beam.Create(inputs)
+        | "Tokenize" >> beam.ParDo(Tokenize(cfg.TOKENIZER_NAME))
+        | "Inference" >>
+        RunInference(model_handler=KeyedModelHandler(model_handler))
+        | "Decode Predictions" >> beam.ParDo(PostProcessor()))
+  metrics = pipeline.result.metrics().query(beam.metrics.MetricsFilter())
+  print("\n\n\n\n")

Review Comment:
   ```suggestion
   ```



##########
website/www/site/content/en/documentation/ml/runinference-metrics.md:
##########
@@ -0,0 +1,103 @@
+---
+title: "RunInference Metrics"
+---
+<!--
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+# RunInference Metrics Example
+
+The main purpose of the example is to demonstrate and explain different 
metrics that are available when using 
[RunInference](https://beam.apache.org/documentation/transforms/python/elementwise/runinference/)
 for doing inference using a machine learning model. We use a pipeline that 
reads a list of sentences, tokeinze the text, uses a Transformer based model 
`distilbert-base-uncased-finetuned-sst-2-english` for classifies the texts into 
two different classes using `RunInference`.
+
+We showcase different RunInference metrics when the pipeline is executed using 
Dataflow Runner on CPU and GPU. The full example code can be found 
[here](https://github.com/apache/beam/tree/master/sdks/python/apache_beam/examples/inference/runinference_metrics/).
+
+
+The file structure for entire pipeline is:
+
+    runinference_metrics/
+    ├── pipeline/
+    │   ├── __init__.py
+    │   ├── options.py
+    │   └── transformations.py
+    ├── __init__.py
+    ├── config.py
+    ├── main.py
+    └── setup.py
+
+`pipeline/transormations.py` contains the code for `beam.DoFn` and additional 
functions that are used for pipeline
+
+`pipeline/options.py` contains the pipeline options to configure the Dataflow 
pipeline
+
+`config.py` defines some variables like GCP PROJECT_ID, NUM_WORKERS that are 
used multiple times
+
+`setup.py` defines the packages/requirements for the pipeline to run
+
+`main.py` contains the pipeline code and some additional functions used for 
running the pipeline
+
+
+### How to Run the Pipeline ?
+First, make sure you have installed the required packages. One should have 
access to a Google Cloud Project and then correctly configure the GCP variables 
like `PROJECT_ID`, `REGION`, and others in `config.py`. For using Dataflow with 
GPU, all the necessary setup instructions are mentioned here: 
https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/dataflow/gpu-examples/pytorch-minimal.
+
+
+1. Dataflow with CPU: `python main.py --mode cloud --device CPU`
+2. Dataflow with GPU: `python main.py --mode cloud --device GPU`
+
+The pipeline can be broken down into few simple steps:
+1. Create a list of texts to use it as an input using `beam.Create`
+2. Tokenizing the text
+3. Using RunInference to do inference
+4. Postprocessing the output of RunInference
+
+The code snippet for the pipeline is:
+
+{{< highlight >}}
+  with beam.Pipeline(options=pipeline_options) as pipeline:
+    _ = (
+        pipeline
+        | "Create inputs" >> beam.Create(inputs)
+        | "Tokenize" >> beam.ParDo(Tokenize(cfg.TOKENIZER_NAME))
+        | "Inference" >>
+        RunInference(model_handler=KeyedModelHandler(model_handler))
+        | "Decode Predictions" >> beam.ParDo(PostProcessor()))
+{{< /highlight >}}
+
+
+## RunInference Metrics
+
+As mentioned above, we benchmarked the performance of RunInference using 
Dataflow on both CPU and GPU. These metrics can be seen in the GCP UI and can 
also be printed using
+{{< highlight >}}
+metrics = pipeline.result.metrics().query(beam.metrics.MetricsFilter())
+{{< /highlight >}}
+
+
+A snapshot of different metrics from GCP UI when using Dataflow on GPU:
+
+  
![runinference-GPU-metrics.png](https://drive.google.com/uc?id=1YIwrFXa3XNxzQWAgm_MiEXaSFymcACmV)

Review Comment:
   > Should we add the png locally?
   
   +1, lets check this in to the repo. That's less brittle (and less likely to 
be an IP issue since its technically not apache licensed in its current 
location)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to