claudevdm commented on code in PR #36654:
URL: https://github.com/apache/beam/pull/36654#discussion_r2475503688


##########
sdks/python/apache_beam/ml/rag/ingestion/spanner.py:
##########
@@ -0,0 +1,660 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Cloud Spanner vector store writer for RAG pipelines.
+
+This module provides a writer for storing embeddings and associated metadata
+in Google Cloud Spanner. It supports flexible schema configuration with the
+ability to flatten metadata fields into dedicated columns.
+
+Example usage:
+
+    Default schema (id, embedding, content, metadata):
+    >>> config = SpannerVectorWriterConfig(
+    ...     project_id="my-project",
+    ...     instance_id="my-instance",
+    ...     database_id="my-db",
+    ...     table_name="embeddings"
+    ... )
+
+    Flattened metadata fields:
+    >>> specs = (
+    ...     SpannerColumnSpecsBuilder()
+    ...     .with_id_spec()
+    ...     .with_embedding_spec()
+    ...     .with_content_spec()
+    ...     .add_metadata_field("source", str)
+    ...     .add_metadata_field("page_number", int, default=0)
+    ...     .with_metadata_spec()
+    ...     .build()
+    ... )
+    >>> config = SpannerVectorWriterConfig(
+    ...     project_id="my-project",
+    ...     instance_id="my-instance",
+    ...     database_id="my-db",
+    ...     table_name="embeddings",
+    ...     column_specs=specs
+    ... )
+
+Spanner schema example:
+
+    CREATE TABLE embeddings (
+        id STRING(1024) NOT NULL,
+        embedding ARRAY<FLOAT32>(vector_length=>768),
+        content STRING(MAX),
+        source STRING(MAX),
+        page_number INT64,
+        metadata JSON
+    ) PRIMARY KEY (id)
+"""
+
+import functools
+import json
+from dataclasses import dataclass
+from typing import Any
+from typing import Callable
+from typing import List
+from typing import Literal
+from typing import NamedTuple
+from typing import Optional
+from typing import Type
+
+import apache_beam as beam
+from apache_beam.coders import registry
+from apache_beam.coders.row_coder import RowCoder
+from apache_beam.io.gcp import spanner
+from apache_beam.ml.rag.ingestion.base import VectorDatabaseWriteConfig
+from apache_beam.ml.rag.types import Chunk
+
+
+@dataclass
+class SpannerColumnSpec:
+  """Column specification for Spanner vector writes.
+  
+  Defines how to extract and format values from Chunks for insertion into
+  Spanner table columns. Each spec maps to one column in the target table.
+  
+  Attributes:
+      column_name: Name of the Spanner table column
+      python_type: Python type for the NamedTuple field (required for RowCoder)
+      value_fn: Function to extract value from a Chunk
+  
+  Examples:
+      String column:
+      >>> SpannerColumnSpec(
+      ...     column_name="id",
+      ...     python_type=str,
+      ...     value_fn=lambda chunk: chunk.id
+      ... )
+      
+      Array column with conversion:
+      >>> SpannerColumnSpec(
+      ...     column_name="embedding",
+      ...     python_type=List[float],
+      ...     value_fn=lambda chunk: chunk.embedding.dense_embedding
+      ... )
+  """
+  column_name: str
+  python_type: Type
+  value_fn: Callable[[Chunk], Any]
+
+
+def _extract_and_convert(extract_fn, convert_fn, chunk):
+  if convert_fn:
+    return convert_fn(extract_fn(chunk))
+  return extract_fn(chunk)
+
+
+class SpannerColumnSpecsBuilder:
+  """Builder for creating Spanner column specifications.
+  
+  Provides a fluent API for defining table schemas and how to populate them
+  from Chunk objects. Supports standard Chunk fields (id, embedding, content,
+  metadata) and flattening metadata fields into dedicated columns.
+  
+  Example:
+      >>> specs = (
+      ...     SpannerColumnSpecsBuilder()
+      ...     .with_id_spec()
+      ...     .with_embedding_spec()
+      ...     .with_content_spec()
+      ...     .add_metadata_field("source", str)
+      ...     .with_metadata_spec()
+      ...     .build()
+      ... )
+  """
+  def __init__(self):
+    self._specs: List[SpannerColumnSpec] = []
+
+  @staticmethod
+  def with_defaults() -> 'SpannerColumnSpecsBuilder':
+    """Create builder with default schema.
+    
+    Default schema includes:
+    - id (STRING): Chunk ID
+    - embedding (ARRAY<FLOAT32>): Dense embedding vector
+    - content (STRING): Chunk content text
+    - metadata (JSON): Full metadata as JSON
+    
+    Returns:
+        Builder with default column specifications
+    """
+    return (
+        SpannerColumnSpecsBuilder().with_id_spec().with_embedding_spec().
+        with_content_spec().with_metadata_spec())
+
+  def with_id_spec(
+      self,
+      column_name: str = "id",
+      python_type: Type = str,
+      extract_fn: Optional[Callable[[Chunk], Any]] = lambda chunk: chunk.id,
+      convert_fn: Optional[Callable[[Any], Any]] = None

Review Comment:
   I separated them out in case someone just wants to change the type - 
convert_fn makes it simple to cast an int to str or something like that, 
extract_fn is if id is coming from somewhere other than chunk.id.  
   
   Dont have a strong preference though, can unify them



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to