tobixdev commented on code in PR #17986: URL: https://github.com/apache/datafusion/pull/17986#discussion_r2441835924
########## datafusion/common/src/metadata.rs: ########## @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use arrow::datatypes::{DataType, Field}; +use hashbrown::HashMap; + +use crate::{error::_plan_err, DataFusionError, ScalarValue}; + +/// A [`ScalarValue`] with optional [`FieldMetadata`] +#[derive(Debug, Clone)] +pub struct Literal { + pub value: ScalarValue, + pub metadata: Option<FieldMetadata>, +} + +impl Literal { + /// Create a new Literal from a scalar value with optional [`FieldMetadata`] + pub fn new(value: ScalarValue, metadata: Option<FieldMetadata>) -> Self { + Self { value, metadata } + } + + /// Access the underlying [ScalarValue] storage + pub fn value(&self) -> &ScalarValue { + &self.value + } + + /// Access the [FieldMetadata] attached to this value, if any + pub fn metadata(&self) -> Option<&FieldMetadata> { + self.metadata.as_ref() + } + + /// Consume self and return components + pub fn into_inner(self) -> (ScalarValue, Option<FieldMetadata>) { + (self.value, self.metadata) + } + + /// Cast this literal's storage type + /// + /// This operation assumes that if the underlying [ScalarValue] can be casted + /// to a given type that any extension type represented by the metadata is also + /// valid. + pub fn cast_storage_to( Review Comment: Could you elaborate on why we need this function? It seems like it is used to cast the provided parameter instances to the type of the placeholder. Can't we for now assume that these types match and return an `Err` if this is not the case? It just seems like we are here trying something that supports logical types / extension types but then we ignore any restrictions they might have on converting to a particular physical type. Just some thoughts, don't know if this is actually a problem. ########## datafusion/expr/src/expr_schema.rs: ########## @@ -921,6 +930,53 @@ mod tests { assert_eq!(meta, outer_ref.metadata(&schema).unwrap()); } + #[test] + fn test_expr_placeholder() { + let schema = MockExprSchema::new(); + + let mut placeholder_meta = HashMap::new(); + placeholder_meta.insert("bar".to_string(), "buzz".to_string()); + let placeholder_meta = FieldMetadata::from(placeholder_meta); + + let expr = Expr::Placeholder(Placeholder::new_with_metadata( + "".to_string(), + Some( + Field::new("", DataType::Utf8, true) + .with_metadata(placeholder_meta.to_hashmap()) + .into(), + ), + )); + + assert!(expr.nullable(&schema).unwrap()); Review Comment: I think this is tested below too. Maybe we can remove the first assertion. ########## datafusion/proto/src/logical_plan/mod.rs: ########## @@ -888,9 +888,33 @@ impl AsLogicalPlan for LogicalPlanNode { .iter() .map(DataType::try_from) .collect::<Result<_, _>>()?; - LogicalPlanBuilder::from(input) - .prepare(prepare.name.clone(), data_types)? - .build() + let fields: Vec<Field> = prepare + .fields + .iter() + .map(Field::try_from) + .collect::<Result<_, _>>()?; + + // If the fields are empty this may have been generated by an Review Comment: Could you also add a test that handles the handling of old serialized plans? ########## datafusion/proto/tests/cases/roundtrip_logical_plan.rs: ########## @@ -1071,6 +1071,37 @@ async fn roundtrip_logical_plan_with_view_scan() -> Result<()> { Ok(()) } +#[tokio::test] +async fn roundtrip_logical_plan_prepared_statement_with_metadata() -> Result<()> { + let ctx = SessionContext::new(); + + let plan = ctx + .sql("SELECT $1") + .await + .unwrap() + .into_optimized_plan() + .unwrap(); + let prepared = LogicalPlanBuilder::new(plan) + .prepare( + "".to_string(), + vec![Field::new("", DataType::Int32, true) + .with_metadata( + [("some_key".to_string(), "some_value".to_string())].into(), + ) + .into()], + ) + .unwrap() + .plan() + .clone(); + + let bytes = logical_plan_to_bytes(&prepared)?; + let logical_round_trip = logical_plan_from_bytes(&bytes, &ctx)?; + assert_eq!(format!("{prepared}"), format!("{logical_round_trip}")); + // Also check exact equality in case metadata isn't reflected in the display string Review Comment: Is there a reason why `assert_eq!(&prepared, &logical_round_trip);` is not enough and the test also asserts the equality of the serialized version? Maybe we could also use insta for this test if asserting the serialized version is important. 🤔 ########## datafusion/expr/src/expr.rs: ########## @@ -2886,19 +2666,23 @@ impl HashNode for Expr { } } -// Modifies expr if it is a placeholder with datatype of right +// Modifies expr to match the DataType, metadata, and nullability of other if it is +// a placeholder with previously unspecified type information (i.e., most placeholders) fn rewrite_placeholder(expr: &mut Expr, other: &Expr, schema: &DFSchema) -> Result<()> { - if let Expr::Placeholder(Placeholder { id: _, data_type }) = expr { - if data_type.is_none() { - let other_dt = other.get_type(schema); - match other_dt { + if let Expr::Placeholder(Placeholder { id: _, field }) = expr { + if field.is_none() { + let other_field = other.to_field(schema); + match other_field { Err(e) => { Err(e.context(format!( "Can not find type of {other} needed to infer type of {expr}" )))?; } - Ok(dt) => { - *data_type = Some(dt); + Ok((_, other_field)) => { Review Comment: Rather than disallowing non-nullable placeholders, could we check during replacing the placeholder that the given field is in-fact a non-nullable field (and maybe assert that the `ScalarValue` is non-null)? Context: For us, the non-nullability gurantee is often very important for certain optimizations. ########## datafusion/sql/src/planner.rs: ########## @@ -600,27 +602,54 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> { SQLDataType::Array(ArrayElemTypeDef::AngleBracket(inner_sql_type)) => { // Arrays may be multi-dimensional. let inner_data_type = self.convert_data_type(inner_sql_type)?; - Ok(DataType::new_list(inner_data_type, true)) + + // Lists are allowed to have an arbitrarily named field; Review Comment: Should we create an issue for creating `DataType::new_list_from_field` in arrow-rs? ########## datafusion/sql/tests/cases/params.rs: ########## @@ -704,6 +732,147 @@ fn test_prepare_statement_to_plan_one_param() { ); } +#[test] +fn test_update_infer_with_metadata() { Review Comment: 👍 ########## datafusion/common/src/metadata.rs: ########## @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use arrow::datatypes::{DataType, Field}; +use hashbrown::HashMap; + +use crate::{error::_plan_err, DataFusionError, ScalarValue}; + +/// A [`ScalarValue`] with optional [`FieldMetadata`] +#[derive(Debug, Clone)] +pub struct Literal { + pub value: ScalarValue, + pub metadata: Option<FieldMetadata>, +} + +impl Literal { + /// Create a new Literal from a scalar value with optional [`FieldMetadata`] + pub fn new(value: ScalarValue, metadata: Option<FieldMetadata>) -> Self { + Self { value, metadata } + } + + /// Access the underlying [ScalarValue] storage + pub fn value(&self) -> &ScalarValue { + &self.value + } + + /// Access the [FieldMetadata] attached to this value, if any + pub fn metadata(&self) -> Option<&FieldMetadata> { + self.metadata.as_ref() + } + + /// Consume self and return components + pub fn into_inner(self) -> (ScalarValue, Option<FieldMetadata>) { + (self.value, self.metadata) + } + + /// Cast this literal's storage type + /// + /// This operation assumes that if the underlying [ScalarValue] can be casted + /// to a given type that any extension type represented by the metadata is also + /// valid. + pub fn cast_storage_to( + &self, + target_type: &DataType, + ) -> Result<Self, DataFusionError> { + let new_value = self.value().cast_to(target_type)?; + Ok(Literal::new(new_value, self.metadata.clone())) + } +} + +/// Assert equality of data types where one or both sides may have field metadata +/// +/// This currently compares absent metadata (e.g., one side was a DataType) and +/// empty metadata (e.g., one side was a field where the field had no metadata) +/// as equal and uses byte-for-byte comparison for the keys and values of the +/// fields, even though this is potentially too strict for some cases (e.g., +/// extension types where extension metadata is represented by JSON, or cases +/// where field metadata is orthogonal to the interpretation of the data type). +/// +/// Returns a planning error with suitably formatted type representations if +/// actual and expected do not compare to equal. +pub fn check_metadata_with_storage_equal( + actual: ( + &DataType, + Option<&std::collections::HashMap<String, String>>, + ), + expected: ( + &DataType, + Option<&std::collections::HashMap<String, String>>, + ), + what: &str, + context: &str, +) -> Result<(), DataFusionError> { + if actual.0 != expected.0 { + return _plan_err!( + "Expected {what} of type {}, got {}{context}", + format_type_and_metadata(expected.0, expected.1), + format_type_and_metadata(actual.0, actual.1) + ); + } + + let metadata_equal = match (actual.1, expected.1) { + (None, None) => true, + (None, Some(expected_metadata)) => expected_metadata.is_empty(), + (Some(actual_metadata), None) => actual_metadata.is_empty(), + (Some(actual_metadata), Some(expected_metadata)) => { + actual_metadata == expected_metadata + } + }; + + if !metadata_equal { + return _plan_err!( + "Expected {what} of type {}, got {}{context}", + format_type_and_metadata(expected.0, expected.1), + format_type_and_metadata(actual.0, actual.1) + ); + } + + Ok(()) +} + +/// Given a data type represented by storage and optional metadata, generate +/// a user-facing string +/// +/// This function exists to reduce the number of Field debug strings that are +/// used to communicate type information in error messages and plan explain +/// renderings. +pub fn format_type_and_metadata( + data_type: &DataType, + metadata: Option<&std::collections::HashMap<String, String>>, +) -> String { Review Comment: I think the best solution would be to delegate the formatting to a registered implementation of a logical type / extension type. This way we can really use type-specific formatting. But I think this is beyond the scope of this PR. ########## datafusion/common/src/metadata.rs: ########## @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use arrow::datatypes::{DataType, Field}; +use hashbrown::HashMap; + +use crate::{error::_plan_err, DataFusionError, ScalarValue}; + +/// A [`ScalarValue`] with optional [`FieldMetadata`] +#[derive(Debug, Clone)] +pub struct Literal { + pub value: ScalarValue, + pub metadata: Option<FieldMetadata>, +} + +impl Literal { + /// Create a new Literal from a scalar value with optional [`FieldMetadata`] + pub fn new(value: ScalarValue, metadata: Option<FieldMetadata>) -> Self { + Self { value, metadata } + } Review Comment: My personal opinion is that `Literal` is OK and that it should be used in `Expr::Literal` and `ConstSimplifyResult::Simplified`. However, I believe this could be done in a follow-up PR and an issue such that others can provide input on that. ########## datafusion/common/src/metadata.rs: ########## @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use arrow::datatypes::{DataType, Field}; +use hashbrown::HashMap; + +use crate::{error::_plan_err, DataFusionError, ScalarValue}; + +/// A [`ScalarValue`] with optional [`FieldMetadata`] +#[derive(Debug, Clone)] +pub struct Literal { + pub value: ScalarValue, + pub metadata: Option<FieldMetadata>, +} + +impl Literal { + /// Create a new Literal from a scalar value with optional [`FieldMetadata`] + pub fn new(value: ScalarValue, metadata: Option<FieldMetadata>) -> Self { + Self { value, metadata } + } + + /// Access the underlying [ScalarValue] storage + pub fn value(&self) -> &ScalarValue { + &self.value + } + + /// Access the [FieldMetadata] attached to this value, if any + pub fn metadata(&self) -> Option<&FieldMetadata> { + self.metadata.as_ref() + } + + /// Consume self and return components + pub fn into_inner(self) -> (ScalarValue, Option<FieldMetadata>) { + (self.value, self.metadata) + } + + /// Cast this literal's storage type + /// + /// This operation assumes that if the underlying [ScalarValue] can be casted + /// to a given type that any extension type represented by the metadata is also + /// valid. + pub fn cast_storage_to( + &self, + target_type: &DataType, + ) -> Result<Self, DataFusionError> { + let new_value = self.value().cast_to(target_type)?; + Ok(Literal::new(new_value, self.metadata.clone())) + } +} + +/// Assert equality of data types where one or both sides may have field metadata +/// +/// This currently compares absent metadata (e.g., one side was a DataType) and +/// empty metadata (e.g., one side was a field where the field had no metadata) +/// as equal and uses byte-for-byte comparison for the keys and values of the +/// fields, even though this is potentially too strict for some cases (e.g., +/// extension types where extension metadata is represented by JSON, or cases +/// where field metadata is orthogonal to the interpretation of the data type). +/// +/// Returns a planning error with suitably formatted type representations if +/// actual and expected do not compare to equal. +pub fn check_metadata_with_storage_equal( + actual: ( + &DataType, + Option<&std::collections::HashMap<String, String>>, + ), + expected: ( + &DataType, + Option<&std::collections::HashMap<String, String>>, + ), + what: &str, + context: &str, +) -> Result<(), DataFusionError> { Review Comment: I agree, I think its better than duplicating the logic. I think a good solution for this problem will require support for registering instances of logical types. Should we track that somewhere? ########## datafusion/expr/src/expr.rs: ########## @@ -2886,19 +2666,23 @@ impl HashNode for Expr { } } -// Modifies expr if it is a placeholder with datatype of right +// Modifies expr to match the DataType, metadata, and nullability of other if it is +// a placeholder with previously unspecified type information (i.e., most placeholders) fn rewrite_placeholder(expr: &mut Expr, other: &Expr, schema: &DFSchema) -> Result<()> { - if let Expr::Placeholder(Placeholder { id: _, data_type }) = expr { - if data_type.is_none() { - let other_dt = other.get_type(schema); - match other_dt { + if let Expr::Placeholder(Placeholder { id: _, field }) = expr { + if field.is_none() { + let other_field = other.to_field(schema); + match other_field { Err(e) => { Err(e.context(format!( "Can not find type of {other} needed to infer type of {expr}" )))?; } - Ok(dt) => { - *data_type = Some(dt); + Ok((_, other_field)) => { Review Comment: I see below you have a test that checks for `name == $1` that $1 is nullable. Is this handled here? My intuition would say that the nullability information should come from the parameter type inference and here we just accept the nullability. ########## datafusion/sql/src/planner.rs: ########## @@ -587,11 +589,11 @@ impl<'a, S: ContextProvider> SqlToRel<'a, S> { }) } - pub(crate) fn convert_data_type(&self, sql_type: &SQLDataType) -> Result<DataType> { + pub(crate) fn convert_data_type(&self, sql_type: &SQLDataType) -> Result<FieldRef> { Review Comment: Maybe this should be renamed to something like `convert_data_type_to_field`? ########## datafusion/sql/tests/cases/params.rs: ########## @@ -51,11 +51,39 @@ impl ParameterTest<'_> { } } +pub struct ParameterTestWithMetadata<'a> { + pub sql: &'a str, + pub expected_types: Vec<(&'a str, Option<FieldRef>)>, + pub param_values: Vec<Literal>, +} + +impl ParameterTestWithMetadata<'_> { + pub fn run(&self) -> String { + let plan = logical_plan(self.sql).unwrap(); + + let actual_types = plan.get_parameter_fields().unwrap(); + let expected_types: HashMap<String, Option<FieldRef>> = self + .expected_types + .iter() + .map(|(k, v)| ((*k).to_string(), v.clone())) + .collect(); + + assert_eq!(actual_types, expected_types); + + let plan_with_params = plan + .clone() + .with_param_values(ParamValues::List(self.param_values.clone())) + .unwrap(); + + format!("** Initial Plan:\n{plan}\n** Final Plan:\n{plan_with_params}") + } +} + fn generate_prepare_stmt_and_data_types(sql: &str) -> (LogicalPlan, String) { let plan = logical_plan(sql).unwrap(); let data_types = match &plan { - LogicalPlan::Statement(Statement::Prepare(Prepare { data_types, .. })) => { - data_types.iter().join(", ").to_string() + LogicalPlan::Statement(Statement::Prepare(Prepare { fields, .. })) => { + fields.iter().map(|f| f.data_type()).join(", ").to_string() Review Comment: Does it make sense to use `format_type_and_metadata` here? ########## datafusion/common/src/metadata.rs: ########## @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use arrow::datatypes::{DataType, Field}; +use hashbrown::HashMap; + +use crate::{error::_plan_err, DataFusionError, ScalarValue}; + +/// A [`ScalarValue`] with optional [`FieldMetadata`] +#[derive(Debug, Clone)] +pub struct Literal { + pub value: ScalarValue, + pub metadata: Option<FieldMetadata>, +} + +impl Literal { + /// Create a new Literal from a scalar value with optional [`FieldMetadata`] + pub fn new(value: ScalarValue, metadata: Option<FieldMetadata>) -> Self { + Self { value, metadata } + } + + /// Access the underlying [ScalarValue] storage + pub fn value(&self) -> &ScalarValue { + &self.value + } + + /// Access the [FieldMetadata] attached to this value, if any + pub fn metadata(&self) -> Option<&FieldMetadata> { + self.metadata.as_ref() + } + + /// Consume self and return components + pub fn into_inner(self) -> (ScalarValue, Option<FieldMetadata>) { + (self.value, self.metadata) + } + + /// Cast this literal's storage type + /// + /// This operation assumes that if the underlying [ScalarValue] can be casted + /// to a given type that any extension type represented by the metadata is also + /// valid. + pub fn cast_storage_to( Review Comment: Ok I see the cast was there before so this should be OK for now. Maybe in the future we can consider the allowed casts of a logical type. ########## datafusion/expr/src/expr_schema.rs: ########## @@ -433,18 +436,18 @@ impl ExprSchemable for Expr { .. }) => { let field = match &**expr { - Expr::Placeholder(Placeholder { data_type, .. }) => { - match &data_type { - None => schema - .data_type_and_nullable(&Column::from_name(name)) - .map(|(d, n)| Field::new(&schema_name, d.clone(), n)), - Some(dt) => Ok(Field::new( - &schema_name, - dt.clone(), - expr.nullable(schema)?, - )), - } - } + Expr::Placeholder(Placeholder { field, .. }) => match &field { + // TODO: This seems to be pulling metadata/types from the schema + // based on the Alias destination. name? Is this correct? + None => schema + .field_from_column(&Column::from_name(name)) + .map(|f| f.clone().with_name(&schema_name)), Review Comment: For me the special handling of `Alias(Placeholder(...))` in general seems to be a bit fishy. Have you tried removing the special case? ########## datafusion/common/src/metadata.rs: ########## @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use arrow::datatypes::{DataType, Field}; +use hashbrown::HashMap; + +use crate::{error::_plan_err, DataFusionError, ScalarValue}; + +/// A [`ScalarValue`] with optional [`FieldMetadata`] +#[derive(Debug, Clone)] +pub struct Literal { + pub value: ScalarValue, + pub metadata: Option<FieldMetadata>, +} + +impl Literal { + /// Create a new Literal from a scalar value with optional [`FieldMetadata`] + pub fn new(value: ScalarValue, metadata: Option<FieldMetadata>) -> Self { + Self { value, metadata } + } + + /// Access the underlying [ScalarValue] storage + pub fn value(&self) -> &ScalarValue { + &self.value + } + + /// Access the [FieldMetadata] attached to this value, if any + pub fn metadata(&self) -> Option<&FieldMetadata> { + self.metadata.as_ref() + } + + /// Consume self and return components + pub fn into_inner(self) -> (ScalarValue, Option<FieldMetadata>) { + (self.value, self.metadata) + } + + /// Cast this literal's storage type + /// + /// This operation assumes that if the underlying [ScalarValue] can be casted + /// to a given type that any extension type represented by the metadata is also + /// valid. + pub fn cast_storage_to( + &self, + target_type: &DataType, + ) -> Result<Self, DataFusionError> { + let new_value = self.value().cast_to(target_type)?; + Ok(Literal::new(new_value, self.metadata.clone())) + } +} + +/// Assert equality of data types where one or both sides may have field metadata +/// +/// This currently compares absent metadata (e.g., one side was a DataType) and +/// empty metadata (e.g., one side was a field where the field had no metadata) +/// as equal and uses byte-for-byte comparison for the keys and values of the +/// fields, even though this is potentially too strict for some cases (e.g., +/// extension types where extension metadata is represented by JSON, or cases +/// where field metadata is orthogonal to the interpretation of the data type). +/// +/// Returns a planning error with suitably formatted type representations if +/// actual and expected do not compare to equal. +pub fn check_metadata_with_storage_equal( + actual: ( + &DataType, + Option<&std::collections::HashMap<String, String>>, Review Comment: Any reason why we're not using `FieldMetadata` here? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
