yeandy commented on code in PR #23497:
URL: https://github.com/apache/beam/pull/23497#discussion_r989141802


##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/main.py:
##########
@@ -0,0 +1,144 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the pipeline for doing anomaly detection."""
+import argparse
+import sys
+
+import apache_beam as beam
+import config as cfg
+from apache_beam.io.gcp.pubsub import ReadFromPubSub
+from apache_beam.ml.inference.base import KeyedModelHandler
+from apache_beam.ml.inference.base import RunInference
+from apache_beam.ml.inference.pytorch_inference import 
PytorchModelHandlerKeyedTensor
+from apache_beam.ml.inference.sklearn_inference import ModelFileType
+from pipeline.options import get_pipeline_options
+from pipeline.transformations import CustomSklearnModelHandlerNumpy
+from pipeline.transformations import Decode
+from pipeline.transformations import DecodePrediction
+from pipeline.transformations import ModelWrapper
+from pipeline.transformations import NormalizeEmbedding
+from pipeline.transformations import TriggerEmailAlert
+from pipeline.transformations import tokenize_sentence
+from transformers import AutoConfig
+
+
+def parse_arguments(argv):
+  """
+    It parses the arguments passed to the command line and returns them as an 
object

Review Comment:
   ```suggestion
       Parses the arguments passed to the command line and returns them as an 
object
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/write_data_to_pubsub_pipeline/pipeline/utils.py:
##########
@@ -0,0 +1,67 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the pipeline."""
+import uuid
+
+import numpy as np
+
+import apache_beam as beam
+from apache_beam.io.gcp.pubsub import PubsubMessage
+from datasets import load_dataset
+
+
+def get_dataset(categories: list, split: str = "train"):
+  """
+    It takes a list of categories and a split (train/test/dev) and returns the

Review Comment:
   ```suggestion
       Takes a list of categories and a split (train/test/dev) and returns the
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py:
##########
@@ -0,0 +1,192 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the anomaly_detection pipeline."""
+import json
+
+import numpy as np
+
+import apache_beam as beam
+import config as cfg
+import hdbscan
+import torch
+import yagmail
+from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
+from apache_beam.ml.inference.sklearn_inference import _validate_inference_args
+from transformers import AutoTokenizer
+from transformers import DistilBertModel
+
+# [START tokenization]
+Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME)
+
+
+def tokenize_sentence(input_dict):
+  """
+    It takes a dictionary with a text and an id, tokenizes the text, and
+    returns a tuple of the text and id and the tokenized text
+
+    Args:
+      input_dict: a dictionary with the text and id of the sentence
+
+    Returns:
+      A tuple of the text and id, and a dictionary of the tokens.
+    """
+  text, uid = input_dict["text"], input_dict["id"]
+  tokens = Tokenizer([text], padding=True, truncation=True, 
return_tensors="pt")
+  tokens = {key: torch.squeeze(val) for key, val in tokens.items()}
+  return (text, uid), tokens
+
+
+# [END tokenization]
+
+
+# [START DistilBertModelWrapper]
+class ModelWrapper(DistilBertModel):
+  """Wrapper to DistilBertModel to get embeddings when calling
+    forward function."""
+  def forward(self, **kwargs):
+    output = super().forward(**kwargs)
+    sentence_embedding = (
+        self.mean_pooling(output,
+                          kwargs["attention_mask"]).detach().cpu().numpy())
+    return sentence_embedding
+
+  # Mean Pooling - Take attention mask into account for correct averaging
+  def mean_pooling(self, model_output, attention_mask):
+    """
+        The function calculates the mean of token embeddings
+
+        Args:
+          model_output: The output of the model.
+          attention_mask: This is a tensor that contains 1s for all input 
tokens and
+          0s for all padding tokens.
+
+        Returns:
+          The mean of the token embeddings.
+        """
+    token_embeddings = model_output[
+        0]  # First element of model_output contains all token embeddings
+    input_mask_expanded = (
+        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float())
+    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
+        input_mask_expanded.sum(1), min=1e-9)
+
+
+# [END DistilBertModelWrapper]
+
+
+# [START CustomSklearnModelHandlerNumpy]
+class CustomSklearnModelHandlerNumpy(SklearnModelHandlerNumpy):
+  # Can be removed once: https://github.com/apache/beam/issues/21863 is fixed
+  def batch_elements_kwargs(self):
+    """Limit batch size to 1 for inference"""
+    return {"max_batch_size": 1}
+
+  def run_inference(self, batch, model, inference_args=None):
+    """Runs inferences on a batch of numpy arrays.
+
+        Args:
+          batch: A sequence of examples as numpy arrays. They should
+            be single examples.
+          model: A numpy model or pipeline. Must implement predict(X).
+            Where the parameter X is a numpy array.
+          inference_args: Any additional arguments for an inference.
+
+        Returns:
+          An Iterable of type PredictionResult.
+        """
+    _validate_inference_args(inference_args)
+    vectorized_batch = np.vstack(batch)
+    predictions = hdbscan.approximate_predict(model, vectorized_batch)
+    return [PredictionResult(x, y) for x, y in zip(batch, predictions)]
+
+
+# [END CustomSklearnModelHandlerNumpy]
+
+
+class NormalizeEmbedding(beam.DoFn):
+  """A DoFn for normalization of text embedding."""
+  def process(self, element, *args, **kwargs):
+    """
+        For each element in the input PCollection, normalize the embedding 
vector, and
+        yield a new element with the normalized embedding added
+        Args:
+          element: The element to be processed.
+        """
+    (text, uid), prediction = element
+    embedding = prediction.inference
+    l2_norm = np.linalg.norm(embedding)
+    yield {"text": text, "id": uid, "embedding": embedding / l2_norm}
+
+
+class Decode(beam.DoFn):
+  """A DoFn for decoding PubSub message into a dictionary."""
+  def process(self, element, *args, **kwargs):
+    """
+        For each element in the input PCollection, retrieve the id and decode 
the bytes into string
+
+        Args:
+          element: The element that is being processed.
+        """
+    yield {
+        "text": element.data.decode("utf-8"),
+        "id": element.attributes["id"],
+    }
+
+
+class DecodePrediction(beam.DoFn):
+  """A DoFn for decoding the prediction from RunInference."""
+  def process(self, element):
+    """
+    The `process` function takes the output of RunInference and returns a 
dictionary
+    with the text, uid, and cluster.**
+
+    Args:
+      element: The input element to be processed.
+    """
+    (text, uid), prediction = element
+    cluster = prediction.inference.item()
+    bq_dict = {"text": text, "id": uid, "cluster": cluster}
+    yield bq_dict
+
+
+class TriggerEmailAlert(beam.DoFn):
+  """A DoFn for sending email using yagmail."""
+  def setup(self):
+    """
+    It opens the cred.json file and initialized the yag SMTP client.

Review Comment:
   ```suggestion
       Opens the cred.json file and initialized the yag SMTP client.
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py:
##########
@@ -0,0 +1,192 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the anomaly_detection pipeline."""
+import json
+
+import numpy as np
+
+import apache_beam as beam
+import config as cfg
+import hdbscan
+import torch
+import yagmail
+from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
+from apache_beam.ml.inference.sklearn_inference import _validate_inference_args
+from transformers import AutoTokenizer
+from transformers import DistilBertModel
+
+# [START tokenization]
+Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME)
+
+
+def tokenize_sentence(input_dict):
+  """
+    It takes a dictionary with a text and an id, tokenizes the text, and
+    returns a tuple of the text and id and the tokenized text
+
+    Args:
+      input_dict: a dictionary with the text and id of the sentence
+
+    Returns:
+      A tuple of the text and id, and a dictionary of the tokens.
+    """
+  text, uid = input_dict["text"], input_dict["id"]
+  tokens = Tokenizer([text], padding=True, truncation=True, 
return_tensors="pt")
+  tokens = {key: torch.squeeze(val) for key, val in tokens.items()}
+  return (text, uid), tokens
+
+
+# [END tokenization]
+
+
+# [START DistilBertModelWrapper]
+class ModelWrapper(DistilBertModel):
+  """Wrapper to DistilBertModel to get embeddings when calling
+    forward function."""
+  def forward(self, **kwargs):
+    output = super().forward(**kwargs)
+    sentence_embedding = (
+        self.mean_pooling(output,
+                          kwargs["attention_mask"]).detach().cpu().numpy())
+    return sentence_embedding
+
+  # Mean Pooling - Take attention mask into account for correct averaging
+  def mean_pooling(self, model_output, attention_mask):
+    """
+        The function calculates the mean of token embeddings
+
+        Args:
+          model_output: The output of the model.
+          attention_mask: This is a tensor that contains 1s for all input 
tokens and
+          0s for all padding tokens.
+
+        Returns:
+          The mean of the token embeddings.
+        """
+    token_embeddings = model_output[
+        0]  # First element of model_output contains all token embeddings
+    input_mask_expanded = (
+        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float())
+    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
+        input_mask_expanded.sum(1), min=1e-9)
+
+
+# [END DistilBertModelWrapper]
+
+
+# [START CustomSklearnModelHandlerNumpy]
+class CustomSklearnModelHandlerNumpy(SklearnModelHandlerNumpy):
+  # Can be removed once: https://github.com/apache/beam/issues/21863 is fixed
+  def batch_elements_kwargs(self):
+    """Limit batch size to 1 for inference"""
+    return {"max_batch_size": 1}
+
+  def run_inference(self, batch, model, inference_args=None):
+    """Runs inferences on a batch of numpy arrays.
+
+        Args:
+          batch: A sequence of examples as numpy arrays. They should
+            be single examples.
+          model: A numpy model or pipeline. Must implement predict(X).
+            Where the parameter X is a numpy array.
+          inference_args: Any additional arguments for an inference.
+
+        Returns:
+          An Iterable of type PredictionResult.
+        """
+    _validate_inference_args(inference_args)
+    vectorized_batch = np.vstack(batch)
+    predictions = hdbscan.approximate_predict(model, vectorized_batch)
+    return [PredictionResult(x, y) for x, y in zip(batch, predictions)]
+
+
+# [END CustomSklearnModelHandlerNumpy]
+
+
+class NormalizeEmbedding(beam.DoFn):
+  """A DoFn for normalization of text embedding."""
+  def process(self, element, *args, **kwargs):
+    """
+        For each element in the input PCollection, normalize the embedding 
vector, and
+        yield a new element with the normalized embedding added
+        Args:

Review Comment:
   ```suggestion
           yield a new element with the normalized embedding added
   
           Args:
   ```
   And also for other doc string locations



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/main.py:
##########
@@ -0,0 +1,144 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the pipeline for doing anomaly detection."""
+import argparse
+import sys
+
+import apache_beam as beam
+import config as cfg
+from apache_beam.io.gcp.pubsub import ReadFromPubSub
+from apache_beam.ml.inference.base import KeyedModelHandler
+from apache_beam.ml.inference.base import RunInference
+from apache_beam.ml.inference.pytorch_inference import 
PytorchModelHandlerKeyedTensor
+from apache_beam.ml.inference.sklearn_inference import ModelFileType
+from pipeline.options import get_pipeline_options
+from pipeline.transformations import CustomSklearnModelHandlerNumpy
+from pipeline.transformations import Decode
+from pipeline.transformations import DecodePrediction
+from pipeline.transformations import ModelWrapper
+from pipeline.transformations import NormalizeEmbedding
+from pipeline.transformations import TriggerEmailAlert
+from pipeline.transformations import tokenize_sentence
+from transformers import AutoConfig
+
+
+def parse_arguments(argv):
+  """
+    It parses the arguments passed to the command line and returns them as an 
object
+
+    Args:
+      argv: The arguments passed to the command line.
+
+    Returns:
+      The arguments that are being passed in.
+    """
+  parser = argparse.ArgumentParser(description="online-clustering")
+
+  parser.add_argument(
+      "-m",
+      "--mode",
+      help="Mode to run pipeline in.",
+      choices=["local", "cloud"],
+      default="local",
+  )
+  parser.add_argument(
+      "-p",
+      "--project",
+      help="GCP project to run pipeline on.",
+      default=cfg.PROJECT_ID,
+  )
+
+  args, _ = parser.parse_known_args(args=argv)
+  return args
+
+
+# [START PytorchNoBatchModelHandler]
+# Can be removed once: https://github.com/apache/beam/issues/21863 is fixed
+class PytorchNoBatchModelHandler(PytorchModelHandlerKeyedTensor):
+  """Wrapper to PytorchModelHandler to limit batch size to 1.
+    The tokenized strings generated from BertTokenizer may have different
+    lengths, which doesn't work with torch.stack() in current RunInference
+    implementation since stack() requires tensors to be the same size.
+    Restricting max_batch_size to 1 means there is only 1 example per `batch`
+    in the run_inference() call.
+    """
+  def batch_elements_kwargs(self):
+    return {"max_batch_size": 1}
+
+
+# [END PytorchNoBatchModelHandler]
+
+
+def run():
+  """
+    It runs the pipeline.  It read from PubSub, decode the message,
+    tokenize the text, get the embedding, normalize the embedding,
+    does anomaly dectection using HDBSCAN trained model, and then
+    writes to BQ, and sends an email alert if anomaly detected.

Review Comment:
   ```suggestion
       Runs the pipeline.  It reads from PubSub, decodes the message,
       tokenizes the text, gets the embedding, normalizes the embedding,
       does anomaly dectection using HDBSCAN trained model, and then
       writes to BQ, sending an email alert if anomaly detected.
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/write_data_to_pubsub_pipeline/main.py:
##########
@@ -0,0 +1,89 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the pipeline for writing twitter messages to PubSub."""
+import argparse
+import sys
+
+import apache_beam as beam
+import config as cfg
+from apache_beam.io.gcp.pubsub import WriteToPubSub
+from pipeline.options import get_pipeline_options
+from pipeline.utils import AssignUniqueID
+from pipeline.utils import ConvertToPubSubMessage
+from pipeline.utils import get_dataset
+
+
+def parse_arguments(argv):
+  """
+    It parses the arguments passed to the command line and returns them as an 
object
+
+    Args:
+      argv: The arguments passed to the command line.
+
+    Returns:
+      The arguments that are being passed in.
+    """
+  parser = argparse.ArgumentParser(description="write-to-pubsub")
+
+  parser.add_argument(
+      "-m",
+      "--mode",
+      help="Mode to run pipeline in.",
+      choices=["local", "cloud"],
+      default="local",
+  )
+  parser.add_argument(
+      "-p",
+      "--project",
+      help="GCP project to run pipeline on.",
+      default=cfg.PROJECT_ID,
+  )
+
+  args, _ = parser.parse_known_args(args=argv)
+  return args
+
+
+def run():
+  """
+    It runs the pipeline. It load the training data,
+    assign a unique ID to each document, convert it to a PubSub message, and
+    write it to PubSub

Review Comment:
   ```suggestion
       Runs the pipeline. It loads the training data,
       assigns a unique ID to each document, converts it to a PubSub message, 
and
       writes it to PubSub
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py:
##########
@@ -0,0 +1,192 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the anomaly_detection pipeline."""
+import json
+
+import numpy as np
+
+import apache_beam as beam
+import config as cfg
+import hdbscan
+import torch
+import yagmail
+from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
+from apache_beam.ml.inference.sklearn_inference import _validate_inference_args
+from transformers import AutoTokenizer
+from transformers import DistilBertModel
+
+# [START tokenization]
+Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME)
+
+
+def tokenize_sentence(input_dict):
+  """
+    It takes a dictionary with a text and an id, tokenizes the text, and
+    returns a tuple of the text and id and the tokenized text
+
+    Args:
+      input_dict: a dictionary with the text and id of the sentence
+
+    Returns:
+      A tuple of the text and id, and a dictionary of the tokens.
+    """
+  text, uid = input_dict["text"], input_dict["id"]
+  tokens = Tokenizer([text], padding=True, truncation=True, 
return_tensors="pt")
+  tokens = {key: torch.squeeze(val) for key, val in tokens.items()}
+  return (text, uid), tokens
+
+
+# [END tokenization]
+
+
+# [START DistilBertModelWrapper]
+class ModelWrapper(DistilBertModel):
+  """Wrapper to DistilBertModel to get embeddings when calling
+    forward function."""
+  def forward(self, **kwargs):
+    output = super().forward(**kwargs)
+    sentence_embedding = (
+        self.mean_pooling(output,
+                          kwargs["attention_mask"]).detach().cpu().numpy())
+    return sentence_embedding
+
+  # Mean Pooling - Take attention mask into account for correct averaging
+  def mean_pooling(self, model_output, attention_mask):
+    """
+        The function calculates the mean of token embeddings

Review Comment:
   ```suggestion
           Calculates the mean of token embeddings
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/options.py:
##########
@@ -0,0 +1,63 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the pipeline options to configure the Dataflow 
pipeline."""
+from datetime import datetime
+
+import config as cfg
+from apache_beam.options.pipeline_options import PipelineOptions
+
+
+def get_pipeline_options(
+    project: str,
+    job_name: str,
+    mode: str,
+    num_workers: int = cfg.NUM_WORKERS,
+    streaming: bool = True,
+) -> PipelineOptions:
+  """Function to retrieve the pipeline options.
+    Args:
+        project: GCP project to run on
+        mode: Indicator to run local, cloud or template
+        num_workers: Number of Workers for running the job parallely
+        max_num_workers: Maximum number of workers running the job parallely
+    Returns:
+        Dataflow pipeline options
+    """
+  job_name = f'{job_name}-{datetime.now().strftime("%Y%m%d%H%M%S")}'
+
+  staging_bucket = f"gs://{cfg.PROJECT_ID}-ml-examples"
+
+  # For a list of available options, check:
+  # 
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options
+  dataflow_options = {
+      "runner": "DirectRunner" if mode == "local" else "DataflowRunner",
+      "job_name": job_name,
+      "project": project,
+      "region": cfg.REGION,
+      "staging_location": f"{staging_bucket}/dflow-staging",
+      "temp_location": f"{staging_bucket}/dflow-temp",
+      # "save_main_session": False,

Review Comment:
   Is this intended to be commented out?



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py:
##########
@@ -0,0 +1,192 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the anomaly_detection pipeline."""
+import json
+
+import numpy as np
+
+import apache_beam as beam
+import config as cfg
+import hdbscan
+import torch
+import yagmail
+from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
+from apache_beam.ml.inference.sklearn_inference import _validate_inference_args
+from transformers import AutoTokenizer
+from transformers import DistilBertModel
+
+# [START tokenization]
+Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME)
+
+
+def tokenize_sentence(input_dict):
+  """
+    It takes a dictionary with a text and an id, tokenizes the text, and
+    returns a tuple of the text and id and the tokenized text
+
+    Args:
+      input_dict: a dictionary with the text and id of the sentence
+
+    Returns:
+      A tuple of the text and id, and a dictionary of the tokens.
+    """
+  text, uid = input_dict["text"], input_dict["id"]
+  tokens = Tokenizer([text], padding=True, truncation=True, 
return_tensors="pt")
+  tokens = {key: torch.squeeze(val) for key, val in tokens.items()}
+  return (text, uid), tokens
+
+
+# [END tokenization]
+
+
+# [START DistilBertModelWrapper]
+class ModelWrapper(DistilBertModel):
+  """Wrapper to DistilBertModel to get embeddings when calling
+    forward function."""
+  def forward(self, **kwargs):
+    output = super().forward(**kwargs)
+    sentence_embedding = (
+        self.mean_pooling(output,
+                          kwargs["attention_mask"]).detach().cpu().numpy())
+    return sentence_embedding
+
+  # Mean Pooling - Take attention mask into account for correct averaging
+  def mean_pooling(self, model_output, attention_mask):
+    """
+        The function calculates the mean of token embeddings
+
+        Args:
+          model_output: The output of the model.
+          attention_mask: This is a tensor that contains 1s for all input 
tokens and
+          0s for all padding tokens.
+
+        Returns:
+          The mean of the token embeddings.
+        """
+    token_embeddings = model_output[
+        0]  # First element of model_output contains all token embeddings
+    input_mask_expanded = (
+        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float())
+    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
+        input_mask_expanded.sum(1), min=1e-9)
+
+
+# [END DistilBertModelWrapper]
+
+
+# [START CustomSklearnModelHandlerNumpy]
+class CustomSklearnModelHandlerNumpy(SklearnModelHandlerNumpy):
+  # Can be removed once: https://github.com/apache/beam/issues/21863 is fixed
+  def batch_elements_kwargs(self):
+    """Limit batch size to 1 for inference"""
+    return {"max_batch_size": 1}
+
+  def run_inference(self, batch, model, inference_args=None):

Review Comment:
   Can we add a comment about also removing this function once 
https://github.com/apache/beam/issues/22572 is fixed?



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py:
##########
@@ -0,0 +1,192 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the anomaly_detection pipeline."""
+import json
+
+import numpy as np
+
+import apache_beam as beam
+import config as cfg
+import hdbscan
+import torch
+import yagmail
+from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
+from apache_beam.ml.inference.sklearn_inference import _validate_inference_args
+from transformers import AutoTokenizer
+from transformers import DistilBertModel
+
+# [START tokenization]
+Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME)
+
+
+def tokenize_sentence(input_dict):
+  """
+    It takes a dictionary with a text and an id, tokenizes the text, and

Review Comment:
   ```suggestion
       Takes a dictionary with a text and an id, tokenizes the text, and
   ```



##########
sdks/python/apache_beam/examples/inference/anomaly_detection/anomaly_detection_pipeline/pipeline/transformations.py:
##########
@@ -0,0 +1,192 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""This file contains the transformations and utility functions for
+the anomaly_detection pipeline."""
+import json
+
+import numpy as np
+
+import apache_beam as beam
+import config as cfg
+import hdbscan
+import torch
+import yagmail
+from apache_beam.ml.inference.base import PredictionResult
+from apache_beam.ml.inference.sklearn_inference import SklearnModelHandlerNumpy
+from apache_beam.ml.inference.sklearn_inference import _validate_inference_args
+from transformers import AutoTokenizer
+from transformers import DistilBertModel
+
+# [START tokenization]
+Tokenizer = AutoTokenizer.from_pretrained(cfg.TOKENIZER_NAME)
+
+
+def tokenize_sentence(input_dict):
+  """
+    It takes a dictionary with a text and an id, tokenizes the text, and
+    returns a tuple of the text and id and the tokenized text
+
+    Args:
+      input_dict: a dictionary with the text and id of the sentence
+
+    Returns:
+      A tuple of the text and id, and a dictionary of the tokens.
+    """
+  text, uid = input_dict["text"], input_dict["id"]
+  tokens = Tokenizer([text], padding=True, truncation=True, 
return_tensors="pt")
+  tokens = {key: torch.squeeze(val) for key, val in tokens.items()}
+  return (text, uid), tokens
+
+
+# [END tokenization]
+
+
+# [START DistilBertModelWrapper]
+class ModelWrapper(DistilBertModel):
+  """Wrapper to DistilBertModel to get embeddings when calling
+    forward function."""
+  def forward(self, **kwargs):
+    output = super().forward(**kwargs)
+    sentence_embedding = (
+        self.mean_pooling(output,
+                          kwargs["attention_mask"]).detach().cpu().numpy())
+    return sentence_embedding
+
+  # Mean Pooling - Take attention mask into account for correct averaging
+  def mean_pooling(self, model_output, attention_mask):
+    """
+        The function calculates the mean of token embeddings
+
+        Args:
+          model_output: The output of the model.
+          attention_mask: This is a tensor that contains 1s for all input 
tokens and
+          0s for all padding tokens.
+
+        Returns:
+          The mean of the token embeddings.
+        """
+    token_embeddings = model_output[
+        0]  # First element of model_output contains all token embeddings
+    input_mask_expanded = (
+        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float())
+    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
+        input_mask_expanded.sum(1), min=1e-9)
+
+
+# [END DistilBertModelWrapper]
+
+
+# [START CustomSklearnModelHandlerNumpy]
+class CustomSklearnModelHandlerNumpy(SklearnModelHandlerNumpy):
+  # Can be removed once: https://github.com/apache/beam/issues/21863 is fixed
+  def batch_elements_kwargs(self):
+    """Limit batch size to 1 for inference"""
+    return {"max_batch_size": 1}
+
+  def run_inference(self, batch, model, inference_args=None):
+    """Runs inferences on a batch of numpy arrays.
+
+        Args:
+          batch: A sequence of examples as numpy arrays. They should
+            be single examples.
+          model: A numpy model or pipeline. Must implement predict(X).
+            Where the parameter X is a numpy array.
+          inference_args: Any additional arguments for an inference.
+
+        Returns:
+          An Iterable of type PredictionResult.
+        """
+    _validate_inference_args(inference_args)
+    vectorized_batch = np.vstack(batch)
+    predictions = hdbscan.approximate_predict(model, vectorized_batch)
+    return [PredictionResult(x, y) for x, y in zip(batch, predictions)]
+
+
+# [END CustomSklearnModelHandlerNumpy]
+
+
+class NormalizeEmbedding(beam.DoFn):
+  """A DoFn for normalization of text embedding."""
+  def process(self, element, *args, **kwargs):
+    """
+        For each element in the input PCollection, normalize the embedding 
vector, and
+        yield a new element with the normalized embedding added
+        Args:
+          element: The element to be processed.
+        """
+    (text, uid), prediction = element
+    embedding = prediction.inference
+    l2_norm = np.linalg.norm(embedding)
+    yield {"text": text, "id": uid, "embedding": embedding / l2_norm}
+
+
+class Decode(beam.DoFn):
+  """A DoFn for decoding PubSub message into a dictionary."""
+  def process(self, element, *args, **kwargs):
+    """
+        For each element in the input PCollection, retrieve the id and decode 
the bytes into string
+
+        Args:
+          element: The element that is being processed.
+        """
+    yield {
+        "text": element.data.decode("utf-8"),
+        "id": element.attributes["id"],
+    }
+
+
+class DecodePrediction(beam.DoFn):
+  """A DoFn for decoding the prediction from RunInference."""
+  def process(self, element):
+    """
+    The `process` function takes the output of RunInference and returns a 
dictionary
+    with the text, uid, and cluster.**
+
+    Args:
+      element: The input element to be processed.
+    """
+    (text, uid), prediction = element
+    cluster = prediction.inference.item()
+    bq_dict = {"text": text, "id": uid, "cluster": cluster}
+    yield bq_dict
+
+
+class TriggerEmailAlert(beam.DoFn):
+  """A DoFn for sending email using yagmail."""
+  def setup(self):
+    """
+    It opens the cred.json file and initialized the yag SMTP client.
+    """
+    with open("./cred.json") as json_file:
+      cred = json.load(json_file)
+      self.yag_smtp_client = yagmail.SMTP(**cred)
+
+  def process(self, element):
+    """
+        It takes a tuple of (text, id) and a prediction, and if the prediction 
is -1, it sends an email to

Review Comment:
   ```suggestion
           Takes a tuple of (text, id) and a prediction, and if the prediction 
is -1, it sends an email to
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to