youngoli commented on a change in pull request #12471: URL: https://github.com/apache/beam/pull/12471#discussion_r466753786
########## File path: sdks/go/pkg/beam/core/runtime/graphx/schema/schema.go ########## @@ -0,0 +1,269 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package schema contains utility functions for relating Go types and Beam Schemas. +// +// Not all Go types can be converted to schemas. This is Go is more expressive than +// Beam schemas. Just as not all Go types can be serialized, similarly, +// not all Beam Schemas will have a conversion to Go types, until the correct +// mechanism exists in the SDK to handle them. +// +// While efforts will be made to have conversions be reversable, this will not +// be possible in all instances. Eg. Go arrays as fields will be converted to +// Beam Arrays, but a Beam Array type will map by default to a Go slice. +package schema + +import ( + "fmt" + "reflect" + "strings" + + "github.com/apache/beam/sdks/go/pkg/beam/core/util/reflectx" + "github.com/apache/beam/sdks/go/pkg/beam/internal/errors" + pipepb "github.com/apache/beam/sdks/go/pkg/beam/model/pipeline_v1" +) + +// FromType returns a Beam Schema of the passed in type. +// Returns an error if the type cannot be converted to a Schema. +func FromType(ot reflect.Type) (*pipepb.Schema, error) { + t := ot // keep the original type for errors. + // The top level schema for a pointer to struct and the struct is the same. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, errors.Errorf("cannot convert %v to schema. FromType only converts structs to schemas", ot) + } + return structToSchema(t) +} + +func structToSchema(t reflect.Type) (*pipepb.Schema, error) { + fields := make([]*pipepb.Field, 0, t.NumField()) + for i := 0; i < t.NumField(); i++ { + f, err := structFieldToField(t.Field(i)) + if err != nil { + return nil, errors.Wrapf(err, "cannot convert field %v to schema", t.Field(i).Name) + } + fields = append(fields, f) + } + return &pipepb.Schema{ + Fields: fields, + }, nil +} + +func structFieldToField(sf reflect.StructField) (*pipepb.Field, error) { + name := sf.Name + if tag := sf.Tag.Get("beam"); tag != "" { + name, _ = parseTag(tag) + } + ftype, err := reflectTypeToFieldType(sf.Type) + if err != nil { + return nil, err + } + return &pipepb.Field{ + Name: name, + Type: ftype, + }, nil +} + +func reflectTypeToFieldType(ot reflect.Type) (*pipepb.FieldType, error) { + var isPtr bool + t := ot + if t.Kind() == reflect.Ptr { + isPtr = true + t = t.Elem() + } + switch t.Kind() { + case reflect.Map: + kt, err := reflectTypeToFieldType(t.Key()) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert key of %v to schema field", ot) + } + vt, err := reflectTypeToFieldType(t.Elem()) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert value of %v to schema field", ot) + } + return &pipepb.FieldType{ + Nullable: isPtr, + TypeInfo: &pipepb.FieldType_MapType{ + MapType: &pipepb.MapType{ + KeyType: kt, + ValueType: vt, + }, + }, + }, nil + case reflect.Struct: + sch, err := structToSchema(t) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert %v to schema field", ot) + } + return &pipepb.FieldType{ + Nullable: isPtr, + TypeInfo: &pipepb.FieldType_RowType{ + RowType: &pipepb.RowType{ + Schema: sch, + }, + }, + }, nil + case reflect.Slice, reflect.Array: + // Special handling for []byte + if t == reflectx.ByteSlice { + return &pipepb.FieldType{ + Nullable: isPtr, + TypeInfo: &pipepb.FieldType_AtomicType{ + AtomicType: pipepb.AtomicType_BYTES, + }, + }, nil + } + vt, err := reflectTypeToFieldType(t.Elem()) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert element type of %v to schema field", ot) + } + return &pipepb.FieldType{ + Nullable: isPtr, + TypeInfo: &pipepb.FieldType_ArrayType{ + ArrayType: &pipepb.ArrayType{ + ElementType: vt, + }, + }, + }, nil + case reflect.Interface, reflect.Chan, reflect.UnsafePointer, reflect.Complex128, reflect.Complex64: + return nil, errors.Errorf("unable to convert unsupported type %v to schema", ot) + default: // must be an atomic type Review comment: Reviewed the change, and the full error propogation is even better than my original suggestion, so two thumbs up for that. But I think missed my suggestion to merge these unsupported type and `default` cases. I.E. combine them into one default case that tries using reflectTypeToAtomicTypeMap, and if that fails, then it's an unsupported type. Having a specific case for unsupported types seems brittle and doesn't serve much of a purpose other than having a slightly different error message than the one under `default`. If one of these types gets added, this is just one more spot that needs to be changed and will break if you forget. Obviouslt it's not so important it needs an immediate fix, but I'd say it's worth bundling into whatever schema PR is coming next. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
