This is an automated email from the ASF dual-hosted git repository.

skrawcz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/hamilton.git

commit a9cfd66efce413886635cde39e701bc4f18d6310
Author: Josh Markovic <[email protected]>
AuthorDate: Thu Jan 8 15:45:58 2026 +0000

    Enforce Ruff B905
---
 contrib/docs/compile_docs.py                                          | 2 +-
 .../hamilton/contrib/user/skrawcz/customize_embeddings/__init__.py    | 2 +-
 contrib/hamilton/contrib/user/skrawcz/fine_tuning/__init__.py         | 2 +-
 examples/LLM_Workflows/modular_llm_stack/lancedb_module.py            | 4 +++-
 examples/LLM_Workflows/modular_llm_stack/marqo_module.py              | 2 +-
 examples/LLM_Workflows/modular_llm_stack/pinecone_module.py           | 2 +-
 examples/LLM_Workflows/modular_llm_stack/qdrant_module.py             | 2 +-
 examples/LLM_Workflows/modular_llm_stack/weaviate_module.py           | 2 +-
 .../LLM_Workflows/retrieval_augmented_generation/backend/ingestion.py | 4 +++-
 examples/due_date_probabilities/probability_estimation.py             | 3 ++-
 hamilton/async_driver.py                                              | 2 +-
 hamilton/experimental/decorators/parameterize_frame.py                | 4 ++--
 hamilton/function_modifiers/expanders.py                              | 4 +++-
 hamilton/plugins/h_kedro.py                                           | 4 ++--
 hamilton/plugins/h_openlineage.py                                     | 1 +
 scripts/update_blogs_in_learning_resources.py                         | 2 +-
 tests/resources/test_driver_serde_mapper.py                           | 2 +-
 ui/backend/server/trackingserver_run_tracking/api.py                  | 2 +-
 18 files changed, 27 insertions(+), 19 deletions(-)

diff --git a/contrib/docs/compile_docs.py b/contrib/docs/compile_docs.py
index b5503ec6..dd492d89 100644
--- a/contrib/docs/compile_docs.py
+++ b/contrib/docs/compile_docs.py
@@ -344,7 +344,7 @@ def _create_commit_file(df_path, single_df):
     os.makedirs(commit_path, exist_ok=True)
     with open(os.path.join(commit_path, "commit.txt"), "w") as f:
         for commit, ts in zip(
-            single_df["__init__.py"]["commit"], 
single_df["__init__.py"]["timestamp"]
+            single_df["__init__.py"]["commit"], 
single_df["__init__.py"]["timestamp"], strict=False
         ):
             f.write(f"[commit::{commit}][ts::{ts}]\n")
 
diff --git 
a/contrib/hamilton/contrib/user/skrawcz/customize_embeddings/__init__.py 
b/contrib/hamilton/contrib/user/skrawcz/customize_embeddings/__init__.py
index a038698d..fed60ce6 100644
--- a/contrib/hamilton/contrib/user/skrawcz/customize_embeddings/__init__.py
+++ b/contrib/hamilton/contrib/user/skrawcz/customize_embeddings/__init__.py
@@ -409,7 +409,7 @@ def _accuracy_and_se(
         threshold = threshold_thousandths / 1000
         total = 0
         correct = 0
-        for cs, ls in zip(cosine_similarity, labeled_similarity):
+        for cs, ls in zip(cosine_similarity, labeled_similarity, strict=False):
             total += 1
             if cs > threshold:
                 prediction = 1
diff --git a/contrib/hamilton/contrib/user/skrawcz/fine_tuning/__init__.py 
b/contrib/hamilton/contrib/user/skrawcz/fine_tuning/__init__.py
index 406a2a3a..606520b5 100644
--- a/contrib/hamilton/contrib/user/skrawcz/fine_tuning/__init__.py
+++ b/contrib/hamilton/contrib/user/skrawcz/fine_tuning/__init__.py
@@ -657,7 +657,7 @@ def hold_out_set_predictions(
             )
             predictions.append(prediction)
             questions.append(tokenizer.decode(sample["input_ids"], 
skip_special_tokens=True))
-    return list(zip(questions, predictions))
+    return list(zip(questions, predictions, strict=False))
 
 
 if __name__ == "__main__":
diff --git a/examples/LLM_Workflows/modular_llm_stack/lancedb_module.py 
b/examples/LLM_Workflows/modular_llm_stack/lancedb_module.py
index 573db8b6..befb74f7 100644
--- a/examples/LLM_Workflows/modular_llm_stack/lancedb_module.py
+++ b/examples/LLM_Workflows/modular_llm_stack/lancedb_module.py
@@ -68,7 +68,9 @@ def data_objects(
     assert len(ids) == len(titles) == len(text_contents) == len(embeddings)
     return [
         dict(squad_id=id_, title=title, context=context, vector=embedding, 
**metadata)
-        for id_, title, context, embedding in zip(ids, titles, text_contents, 
embeddings)
+        for id_, title, context, embedding in zip(
+            ids, titles, text_contents, embeddings, strict=False
+        )
     ]
 
 
diff --git a/examples/LLM_Workflows/modular_llm_stack/marqo_module.py 
b/examples/LLM_Workflows/modular_llm_stack/marqo_module.py
index e186f781..2c1bea0c 100644
--- a/examples/LLM_Workflows/modular_llm_stack/marqo_module.py
+++ b/examples/LLM_Workflows/modular_llm_stack/marqo_module.py
@@ -58,7 +58,7 @@ def data_objects(
     assert len(ids) == len(titles) == len(text_contents)
     return [
         dict(_id=id, title=title, Description=text_content)
-        for id, title, text_content in zip(ids, titles, text_contents)
+        for id, title, text_content in zip(ids, titles, text_contents, 
strict=False)
         if id is not None and title is not None or text_content is not None
     ]
 
diff --git a/examples/LLM_Workflows/modular_llm_stack/pinecone_module.py 
b/examples/LLM_Workflows/modular_llm_stack/pinecone_module.py
index 38f4d3bf..29f8bacb 100644
--- a/examples/LLM_Workflows/modular_llm_stack/pinecone_module.py
+++ b/examples/LLM_Workflows/modular_llm_stack/pinecone_module.py
@@ -62,7 +62,7 @@ def data_objects(
     assert len(ids) == len(titles) == len(embeddings)
     properties = [dict(title=title, **metadata) for title in titles]
     embeddings = [x.tolist() for x in embeddings]
-    return list(zip(ids, embeddings, properties))
+    return list(zip(ids, embeddings, properties, strict=False))
 
 
 def push_to_vector_db(
diff --git a/examples/LLM_Workflows/modular_llm_stack/qdrant_module.py 
b/examples/LLM_Workflows/modular_llm_stack/qdrant_module.py
index 9856cb17..a05fc26f 100644
--- a/examples/LLM_Workflows/modular_llm_stack/qdrant_module.py
+++ b/examples/LLM_Workflows/modular_llm_stack/qdrant_module.py
@@ -59,7 +59,7 @@ def data_objects(
     ids = list(range(len(ids)))
     payloads = [
         dict(id=_id, text_content=text_content, title=title, **metadata)
-        for _id, title, text_content in zip(ids, titles, text_contents)
+        for _id, title, text_content in zip(ids, titles, text_contents, 
strict=False)
     ]
     embeddings = [x.tolist() for x in embeddings]
     return dict(ids=ids, vectors=embeddings, payload=payloads)
diff --git a/examples/LLM_Workflows/modular_llm_stack/weaviate_module.py 
b/examples/LLM_Workflows/modular_llm_stack/weaviate_module.py
index 3a723588..ed31145e 100644
--- a/examples/LLM_Workflows/modular_llm_stack/weaviate_module.py
+++ b/examples/LLM_Workflows/modular_llm_stack/weaviate_module.py
@@ -86,7 +86,7 @@ def data_objects(
     assert len(ids) == len(titles) == len(text_contents)
     return [
         dict(squad_id=id_, title=title, context=context, **metadata)
-        for id_, title, context in zip(ids, titles, text_contents)
+        for id_, title, context in zip(ids, titles, text_contents, 
strict=False)
     ]
 
 
diff --git 
a/examples/LLM_Workflows/retrieval_augmented_generation/backend/ingestion.py 
b/examples/LLM_Workflows/retrieval_augmented_generation/backend/ingestion.py
index 14bfedd1..209debc2 100644
--- a/examples/LLM_Workflows/retrieval_augmented_generation/backend/ingestion.py
+++ b/examples/LLM_Workflows/retrieval_augmented_generation/backend/ingestion.py
@@ -212,7 +212,9 @@ def store_documents(
                 uuid=document_uuid,
             )
 
-            chunk_iterator = zip(pdf_obj["chunked_text"], 
pdf_obj["chunked_embeddings"])
+            chunk_iterator = zip(
+                pdf_obj["chunked_text"], pdf_obj["chunked_embeddings"], 
strict=False
+            )
             for chunk_idx, (chunk_text, chunk_embedding) in 
enumerate(chunk_iterator):
                 chunk_object = dict(content=chunk_text, chunk_index=chunk_idx)
                 chunk_uuid = generate_uuid5(chunk_object, "Chunk")
diff --git a/examples/due_date_probabilities/probability_estimation.py 
b/examples/due_date_probabilities/probability_estimation.py
index a3878750..8009cbb8 100644
--- a/examples/due_date_probabilities/probability_estimation.py
+++ b/examples/due_date_probabilities/probability_estimation.py
@@ -133,7 +133,8 @@ def raw_probabilities(raw_data: str) -> pd.DataFrame:
     days = [int(item.split(", ")[1].split()[0]) for item in raw_data]
     probability = [float(item.split()[5].replace("%", "")) / 100 for item in 
raw_data]
     probabilities_data = [
-        (week * 7 + day, probability) for week, day, probability in zip(weeks, 
days, probability)
+        (week * 7 + day, probability)
+        for week, day, probability in zip(weeks, days, probability, 
strict=False)
     ]
     probabilities_df = pd.DataFrame(probabilities_data)
     probabilities_df.columns = ["days", "probability"]
diff --git a/hamilton/async_driver.py b/hamilton/async_driver.py
index aa6287fb..95a53a52 100644
--- a/hamilton/async_driver.py
+++ b/hamilton/async_driver.py
@@ -38,7 +38,7 @@ async def await_dict_of_tasks(task_dict: Dict[str, 
typing.Awaitable]) -> Dict[st
     keys = sorted(task_dict.keys())
     coroutines = [task_dict[key] for key in keys]
     coroutines_gathered = await asyncio.gather(*coroutines)
-    return dict(zip(keys, coroutines_gathered))
+    return dict(zip(keys, coroutines_gathered, strict=False))
 
 
 async def process_value(val: Any) -> Any:
diff --git a/hamilton/experimental/decorators/parameterize_frame.py 
b/hamilton/experimental/decorators/parameterize_frame.py
index 30f9b331..10aee4a5 100644
--- a/hamilton/experimental/decorators/parameterize_frame.py
+++ b/hamilton/experimental/decorators/parameterize_frame.py
@@ -67,11 +67,11 @@ def _convert_params_from_df(parameterization: pd.DataFrame) 
-> List[Parameterize
     for _, column_set in parameterization.iterrows():
         parameterization = {
             arg: dep_type(col_value)
-            for arg, col_value, dep_type in zip(args, column_set, 
dep_types_converted)
+            for arg, col_value, dep_type in zip(args, column_set, 
dep_types_converted, strict=False)
             if dep_type is not None
         }
         extracted_columns = [
-            col for col, dep_type in zip(column_set, dep_types) if dep_type == 
"out"
+            col for col, dep_type in zip(column_set, dep_types, strict=False) 
if dep_type == "out"
         ]
         out.append(ParameterizedExtract(tuple(extracted_columns), 
parameterization))
     return out
diff --git a/hamilton/function_modifiers/expanders.py 
b/hamilton/function_modifiers/expanders.py
index 9e16a945..a0074d35 100644
--- a/hamilton/function_modifiers/expanders.py
+++ b/hamilton/function_modifiers/expanders.py
@@ -1065,7 +1065,9 @@ class unpack_fields(base.SingleNodeNodeTransformer):
 
         output_nodes = [node_.copy_with(callabl=tuple_generator)]
 
-        for idx, (field_name, field_type) in enumerate(zip(self.fields, 
self.field_types)):
+        for idx, (field_name, field_type) in enumerate(
+            zip(self.fields, self.field_types, strict=False)
+        ):
 
             def extractor(field_index: int = idx, **kwargs) -> field_type:  # 
type: ignore
                 # This extractor is constructed to avoid closure issues.
diff --git a/hamilton/plugins/h_kedro.py b/hamilton/plugins/h_kedro.py
index b5e61723..a20557b8 100644
--- a/hamilton/plugins/h_kedro.py
+++ b/hamilton/plugins/h_kedro.py
@@ -35,7 +35,7 @@ def expand_k_node(base_node: HNode, outputs: List[str]) -> 
List[HNode]:
     """
 
     def _convert_output_from_tuple_to_dict(node_result: Any, node_kwargs: 
Dict[str, Any]):
-        return {out: v for out, v in zip(outputs, node_result)}
+        return {out: v for out, v in zip(outputs, node_result, strict=False)}
 
     # NOTE isinstance(Any, type) is False for Python < 3.11
     extractor = extract_fields(fields={out: Any for out in outputs})
@@ -91,7 +91,7 @@ def k_node_to_h_nodes(node: KNode) -> List[HNode]:
 
     # remap the function parameters to the node `inputs` and clean Kedro 
`parameters` name
     new_params = {}
-    for param, k_input in zip(params, node.inputs):
+    for param, k_input in zip(params, node.inputs, strict=False):
         if k_input.startswith("params:"):
             k_input = k_input.partition("params:")[-1]
 
diff --git a/hamilton/plugins/h_openlineage.py 
b/hamilton/plugins/h_openlineage.py
index 2554d568..a186b29e 100644
--- a/hamilton/plugins/h_openlineage.py
+++ b/hamilton/plugins/h_openlineage.py
@@ -61,6 +61,7 @@ def extract_schema_facet(metadata):
             for k, v in zip(
                 metadata["dataframe_metadata"]["column_names"],
                 metadata["dataframe_metadata"]["datatypes"],
+                strict=False,
             )
         ]
         schema_facet = facet_v2.schema_dataset.SchemaDatasetFacet(
diff --git a/scripts/update_blogs_in_learning_resources.py 
b/scripts/update_blogs_in_learning_resources.py
index dd6601ca..3472d821 100644
--- a/scripts/update_blogs_in_learning_resources.py
+++ b/scripts/update_blogs_in_learning_resources.py
@@ -105,7 +105,7 @@ def fetch_articles(url, cutoff_date):
             None,
         ),
     ]
-    for _i, (anchor, time_el) in enumerate(zip(anchors, timeEls)):
+    for _i, (anchor, time_el) in enumerate(zip(anchors, timeEls, 
strict=False)):
         link = anchor["href"]
         text = anchor.get_text()
 
diff --git a/tests/resources/test_driver_serde_mapper.py 
b/tests/resources/test_driver_serde_mapper.py
index 7d71edf5..7b9f064d 100644
--- a/tests/resources/test_driver_serde_mapper.py
+++ b/tests/resources/test_driver_serde_mapper.py
@@ -27,7 +27,7 @@ def mapper(
 ) -> Parallelizable[dict]:
     if final_vars is None:
         final_vars = []
-    for dr, input_ in zip(drivers, inputs):
+    for dr, input_ in zip(drivers, inputs, strict=False):
         yield {
             "dr": dr,
             "final_vars": final_vars or dr.list_available_variables(),
diff --git a/ui/backend/server/trackingserver_run_tracking/api.py 
b/ui/backend/server/trackingserver_run_tracking/api.py
index c216c3a6..a95e9c96 100644
--- a/ui/backend/server/trackingserver_run_tracking/api.py
+++ b/ui/backend/server/trackingserver_run_tracking/api.py
@@ -426,7 +426,7 @@ async def get_latest_template_runs(
     return CatalogZoomResponse(
         node_runs=[
             NodeRunOutWithExtraData.from_orm(node_run, 
dag_template_id=dag_template_id)
-            for (node_run, dag_template_id) in zip(node_runs, all_dag_versions)
+            for (node_run, dag_template_id) in zip(node_runs, 
all_dag_versions, strict=False)
         ],
         node_templates=[
             NodeTemplateOut.from_orm(node_template) for node_template in 
node_templates

Reply via email to