AnandInguva commented on code in PR #31514:
URL: https://github.com/apache/beam/pull/31514#discussion_r1629714482


##########
sdks/python/apache_beam/ml/transforms/embeddings/vertex_ai.py:
##########
@@ -157,3 +160,95 @@ def get_ptransform_for_processing(self, **kwargs) -> 
beam.PTransform:
     return RunInference(
         model_handler=_TextEmbeddingHandler(self),
         inference_args=self.inference_args)
+
+
+class _VertexAIImageEmbeddingHandler(ModelHandler):
+  def __init__(
+      self,
+      model_name: str,
+      dimension: Optional[int] = None,
+      project: Optional[str] = None,
+      location: Optional[str] = None,
+      credentials: Optional[Credentials] = None,
+  ):
+    vertexai.init(project=project, location=location, credentials=credentials)
+    self.model_name = model_name
+    self.dimension = dimension
+
+  def run_inference(
+      self,
+      batch: Sequence[Image],
+      model: MultiModalEmbeddingModel,
+      inference_args: Optional[Dict[str, Any]] = None,
+  ) -> Iterable:
+    embeddings = []
+    # Maximum request size for muli-model embedding models is 1.
+    for img in batch:
+      try:
+        embedding_response = model.get_embeddings(
+            image=img, dimension=self.dimension)
+        embeddings.append(embedding_response.image_embedding)
+      except Exception as e:
+        print(e)
+    return embeddings
+
+  def load_model(self):
+    model = MultiModalEmbeddingModel.from_pretrained(self.model_name)
+    return model
+
+  def __repr__(self):
+    # ModelHandler is internal to the user and is not exposed.
+    # Hence we need to override the __repr__ method to expose
+    # the name of the class.
+    return 'VertexAIImageEmbeddings'
+
+
+class VertexAIImageEmbeddings(EmbeddingsManager):
+  def __init__(
+      self,
+      model_name: str,
+      columns: List[str],
+      dimension: Optional[int],
+      project: Optional[str] = None,
+      location: Optional[str] = None,
+      credentials: Optional[Credentials] = None,
+      **kwargs):
+    """
+    Embedding Config for Vertex AI Image Embedding models following
+    
https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-multimodal-embeddings
 # pylint: disable=line-too-long
+    Image Embeddings are generated for a batch of images using the Vertex AI 
API.
+    Embeddings are returned in a list for each image in the batch. This
+    transform makes remote calls to the Vertex AI service and may incur costs
+    for use.
+
+    Args:
+      model_name: The name of the Vertex AI Multi-Modal Embedding model.

Review Comment:
   dimension docstring is missing. 



##########
sdks/python/apache_beam/ml/transforms/embeddings/vertex_ai.py:
##########
@@ -157,3 +160,95 @@ def get_ptransform_for_processing(self, **kwargs) -> 
beam.PTransform:
     return RunInference(
         model_handler=_TextEmbeddingHandler(self),
         inference_args=self.inference_args)
+
+
+class _VertexAIImageEmbeddingHandler(ModelHandler):
+  def __init__(
+      self,
+      model_name: str,
+      dimension: Optional[int] = None,
+      project: Optional[str] = None,
+      location: Optional[str] = None,
+      credentials: Optional[Credentials] = None,
+  ):
+    vertexai.init(project=project, location=location, credentials=credentials)
+    self.model_name = model_name
+    self.dimension = dimension
+
+  def run_inference(
+      self,
+      batch: Sequence[Image],
+      model: MultiModalEmbeddingModel,
+      inference_args: Optional[Dict[str, Any]] = None,
+  ) -> Iterable:
+    embeddings = []
+    # Maximum request size for muli-model embedding models is 1.
+    for img in batch:
+      try:
+        embedding_response = model.get_embeddings(
+            image=img, dimension=self.dimension)
+        embeddings.append(embedding_response.image_embedding)
+      except Exception as e:
+        print(e)

Review Comment:
   print error might be an issue if there are too many elements for the Log 
explorer to handle and might result in missing few logs.



##########
sdks/python/apache_beam/ml/transforms/embeddings/vertex_ai_test.py:
##########
@@ -245,5 +248,45 @@ def test_mltransform_to_ptransform_with_vertex(self):
           ptransform_list[i]._model_handler._underlying.model_name, model_name)
 
 
[email protected](
+    VertexAIImageEmbeddings is None, 'Vertex AI Python SDK is not installed.')
+class VertexAIImageEmbeddingsTest(unittest.TestCase):

Review Comment:
   Maybe create a directory `test_resources`  or something similar and put the 
image there instead of GCS?



##########
sdks/python/apache_beam/ml/transforms/embeddings/vertex_ai.py:
##########
@@ -157,3 +160,95 @@ def get_ptransform_for_processing(self, **kwargs) -> 
beam.PTransform:
     return RunInference(
         model_handler=_TextEmbeddingHandler(self),
         inference_args=self.inference_args)
+
+
+class _VertexAIImageEmbeddingHandler(ModelHandler):
+  def __init__(
+      self,
+      model_name: str,
+      dimension: Optional[int] = None,
+      project: Optional[str] = None,
+      location: Optional[str] = None,
+      credentials: Optional[Credentials] = None,
+  ):
+    vertexai.init(project=project, location=location, credentials=credentials)
+    self.model_name = model_name
+    self.dimension = dimension
+
+  def run_inference(
+      self,
+      batch: Sequence[Image],
+      model: MultiModalEmbeddingModel,
+      inference_args: Optional[Dict[str, Any]] = None,
+  ) -> Iterable:
+    embeddings = []
+    # Maximum request size for muli-model embedding models is 1.
+    for img in batch:
+      try:
+        embedding_response = model.get_embeddings(
+            image=img, dimension=self.dimension)
+        embeddings.append(embedding_response.image_embedding)
+      except Exception as e:
+        print(e)

Review Comment:
   why don't we raise an exception?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to