felipecrv commented on code in PR #3325:
URL: https://github.com/apache/arrow-adbc/pull/3325#discussion_r2298749033


##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool
+}
+
+func (c *connectionImpl) Close() error {
+       if c.conn == nil {
+               return adbc.Error{Code: adbc.StatusInvalidState}
+       }
+       defer func() {
+               c.conn = nil
+       }()
+       return c.conn.Close()
+}
+
+func (c *connectionImpl) NewStatement() (adbc.Statement, error) {
+       return &statementImpl{
+               conn: c,
+       }, nil
+}
+
+// Autocommit interface implementation
+func (c *connectionImpl) GetAutocommit() bool {
+       return c.autocommit
+}
+
+func (c *connectionImpl) SetAutocommit(autocommit bool) error {
+       // Databricks SQL doesn't support explicit transaction control in the 
same way
+       // as traditional databases. Most operations are implicitly committed.
+       // We'll track the autocommit state but won't change the underlying 
connection behavior.
+       c.autocommit = autocommit
+       return nil
+}
+
+// CurrentNamespacer interface implementation
+func (c *connectionImpl) GetCurrentCatalog() (string, error) {
+       return c.catalog, nil
+}
+
+func (c *connectionImpl) GetCurrentDbSchema() (string, error) {
+       return c.dbSchema, nil
+}
+
+func (c *connectionImpl) SetCurrentCatalog(catalog string) error {
+       // Use the database to execute USE CATALOG
+       if c.conn != nil && catalog != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE CATALOG %s", 
catalog)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set catalog: %v", 
err),
+                       }
+               }
+       }
+       c.catalog = catalog
+       return nil
+}
+
+func (c *connectionImpl) SetCurrentDbSchema(schema string) error {
+       // Use the database to execute USE SCHEMA
+       if c.conn != nil && schema != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE SCHEMA %s", 
schema)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set schema: %v", 
err),
+                       }
+               }
+       }
+       c.dbSchema = schema
+       return nil
+}
+
+// TableTypeLister interface implementation
+func (c *connectionImpl) ListTableTypes(ctx context.Context) ([]string, error) 
{
+       // Databricks supports these table types
+       return []string{"TABLE", "VIEW", "EXTERNAL_TABLE", "MANAGED_TABLE"}, nil
+}
+
+func (c *connectionImpl) GetTableTypes(ctx context.Context) 
(array.RecordReader, error) {
+       // Databricks supports these table types
+       tableTypes := []string{"TABLE", "VIEW", "EXTERNAL_TABLE", 
"MANAGED_TABLE"}
+
+       // Create Arrow schema for table types
+       schema := arrow.NewSchema([]arrow.Field{
+               {Name: "table_type", Type: arrow.BinaryTypes.String},
+       }, nil)
+
+       // Create record batch
+       bldr := array.NewRecordBuilder(memory.DefaultAllocator, schema)
+       defer bldr.Release()
+
+       tableTypeBuilder := bldr.Field(0).(*array.StringBuilder)
+       for _, tableType := range tableTypes {
+               tableTypeBuilder.Append(tableType)
+       }
+
+       rec := bldr.NewRecord()
+       defer rec.Release()
+
+       reader, err := array.NewRecordReader(schema, []arrow.Record{rec})
+       if err != nil {
+               return nil, err
+       }
+       return reader, nil
+}
+
+// Transaction methods (Databricks has limited transaction support)
+func (c *connectionImpl) Commit(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.
+       return nil
+}
+
+func (c *connectionImpl) Rollback(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.

Review Comment:
   It doesn't look like any tracking is happening.
   
   ```suggestion
        // Most operations are auto-committed.
   ```



##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool
+}
+
+func (c *connectionImpl) Close() error {
+       if c.conn == nil {
+               return adbc.Error{Code: adbc.StatusInvalidState}
+       }
+       defer func() {
+               c.conn = nil
+       }()
+       return c.conn.Close()
+}
+
+func (c *connectionImpl) NewStatement() (adbc.Statement, error) {
+       return &statementImpl{
+               conn: c,
+       }, nil
+}
+
+// Autocommit interface implementation
+func (c *connectionImpl) GetAutocommit() bool {
+       return c.autocommit
+}
+
+func (c *connectionImpl) SetAutocommit(autocommit bool) error {
+       // Databricks SQL doesn't support explicit transaction control in the 
same way
+       // as traditional databases. Most operations are implicitly committed.
+       // We'll track the autocommit state but won't change the underlying 
connection behavior.
+       c.autocommit = autocommit
+       return nil
+}
+
+// CurrentNamespacer interface implementation
+func (c *connectionImpl) GetCurrentCatalog() (string, error) {
+       return c.catalog, nil
+}
+
+func (c *connectionImpl) GetCurrentDbSchema() (string, error) {
+       return c.dbSchema, nil
+}
+
+func (c *connectionImpl) SetCurrentCatalog(catalog string) error {
+       // Use the database to execute USE CATALOG
+       if c.conn != nil && catalog != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE CATALOG %s", 
catalog)

Review Comment:
   It's OK to assume `catalog` is properly escaped (review of callers 
necessary), but the %s should be properly quoted.



##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool
+}
+
+func (c *connectionImpl) Close() error {
+       if c.conn == nil {
+               return adbc.Error{Code: adbc.StatusInvalidState}
+       }
+       defer func() {
+               c.conn = nil
+       }()
+       return c.conn.Close()
+}
+
+func (c *connectionImpl) NewStatement() (adbc.Statement, error) {
+       return &statementImpl{
+               conn: c,
+       }, nil
+}
+
+// Autocommit interface implementation
+func (c *connectionImpl) GetAutocommit() bool {
+       return c.autocommit
+}
+
+func (c *connectionImpl) SetAutocommit(autocommit bool) error {
+       // Databricks SQL doesn't support explicit transaction control in the 
same way
+       // as traditional databases. Most operations are implicitly committed.
+       // We'll track the autocommit state but won't change the underlying 
connection behavior.
+       c.autocommit = autocommit
+       return nil
+}
+
+// CurrentNamespacer interface implementation
+func (c *connectionImpl) GetCurrentCatalog() (string, error) {
+       return c.catalog, nil
+}
+
+func (c *connectionImpl) GetCurrentDbSchema() (string, error) {
+       return c.dbSchema, nil
+}
+
+func (c *connectionImpl) SetCurrentCatalog(catalog string) error {
+       // Use the database to execute USE CATALOG
+       if c.conn != nil && catalog != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE CATALOG %s", 
catalog)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set catalog: %v", 
err),
+                       }
+               }
+       }
+       c.catalog = catalog
+       return nil
+}
+
+func (c *connectionImpl) SetCurrentDbSchema(schema string) error {
+       // Use the database to execute USE SCHEMA
+       if c.conn != nil && schema != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE SCHEMA %s", 
schema)

Review Comment:
   Same comment about quoting here.



##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool
+}
+
+func (c *connectionImpl) Close() error {
+       if c.conn == nil {
+               return adbc.Error{Code: adbc.StatusInvalidState}
+       }
+       defer func() {
+               c.conn = nil
+       }()
+       return c.conn.Close()
+}
+
+func (c *connectionImpl) NewStatement() (adbc.Statement, error) {
+       return &statementImpl{
+               conn: c,
+       }, nil
+}
+
+// Autocommit interface implementation
+func (c *connectionImpl) GetAutocommit() bool {
+       return c.autocommit
+}
+
+func (c *connectionImpl) SetAutocommit(autocommit bool) error {
+       // Databricks SQL doesn't support explicit transaction control in the 
same way
+       // as traditional databases. Most operations are implicitly committed.
+       // We'll track the autocommit state but won't change the underlying 
connection behavior.
+       c.autocommit = autocommit
+       return nil
+}
+
+// CurrentNamespacer interface implementation
+func (c *connectionImpl) GetCurrentCatalog() (string, error) {
+       return c.catalog, nil
+}
+
+func (c *connectionImpl) GetCurrentDbSchema() (string, error) {
+       return c.dbSchema, nil
+}
+
+func (c *connectionImpl) SetCurrentCatalog(catalog string) error {
+       // Use the database to execute USE CATALOG
+       if c.conn != nil && catalog != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE CATALOG %s", 
catalog)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set catalog: %v", 
err),
+                       }
+               }
+       }
+       c.catalog = catalog
+       return nil
+}
+
+func (c *connectionImpl) SetCurrentDbSchema(schema string) error {
+       // Use the database to execute USE SCHEMA
+       if c.conn != nil && schema != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE SCHEMA %s", 
schema)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set schema: %v", 
err),
+                       }
+               }
+       }
+       c.dbSchema = schema
+       return nil
+}
+
+// TableTypeLister interface implementation
+func (c *connectionImpl) ListTableTypes(ctx context.Context) ([]string, error) 
{
+       // Databricks supports these table types
+       return []string{"TABLE", "VIEW", "EXTERNAL_TABLE", "MANAGED_TABLE"}, nil
+}
+
+func (c *connectionImpl) GetTableTypes(ctx context.Context) 
(array.RecordReader, error) {
+       // Databricks supports these table types
+       tableTypes := []string{"TABLE", "VIEW", "EXTERNAL_TABLE", 
"MANAGED_TABLE"}
+
+       // Create Arrow schema for table types
+       schema := arrow.NewSchema([]arrow.Field{
+               {Name: "table_type", Type: arrow.BinaryTypes.String},
+       }, nil)
+
+       // Create record batch
+       bldr := array.NewRecordBuilder(memory.DefaultAllocator, schema)
+       defer bldr.Release()
+
+       tableTypeBuilder := bldr.Field(0).(*array.StringBuilder)
+       for _, tableType := range tableTypes {
+               tableTypeBuilder.Append(tableType)
+       }
+
+       rec := bldr.NewRecord()
+       defer rec.Release()
+
+       reader, err := array.NewRecordReader(schema, []arrow.Record{rec})
+       if err != nil {
+               return nil, err
+       }
+       return reader, nil
+}
+
+// Transaction methods (Databricks has limited transaction support)
+func (c *connectionImpl) Commit(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.

Review Comment:
   ```suggestion
        // Most operations are auto-committed.
   ```



##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool
+}
+
+func (c *connectionImpl) Close() error {
+       if c.conn == nil {
+               return adbc.Error{Code: adbc.StatusInvalidState}
+       }
+       defer func() {
+               c.conn = nil
+       }()
+       return c.conn.Close()
+}
+
+func (c *connectionImpl) NewStatement() (adbc.Statement, error) {
+       return &statementImpl{
+               conn: c,
+       }, nil
+}
+
+// Autocommit interface implementation
+func (c *connectionImpl) GetAutocommit() bool {
+       return c.autocommit
+}
+
+func (c *connectionImpl) SetAutocommit(autocommit bool) error {
+       // Databricks SQL doesn't support explicit transaction control in the 
same way
+       // as traditional databases. Most operations are implicitly committed.
+       // We'll track the autocommit state but won't change the underlying 
connection behavior.
+       c.autocommit = autocommit
+       return nil
+}
+
+// CurrentNamespacer interface implementation
+func (c *connectionImpl) GetCurrentCatalog() (string, error) {
+       return c.catalog, nil
+}
+
+func (c *connectionImpl) GetCurrentDbSchema() (string, error) {
+       return c.dbSchema, nil
+}
+
+func (c *connectionImpl) SetCurrentCatalog(catalog string) error {
+       // Use the database to execute USE CATALOG
+       if c.conn != nil && catalog != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE CATALOG %s", 
catalog)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set catalog: %v", 
err),
+                       }
+               }
+       }
+       c.catalog = catalog
+       return nil
+}
+
+func (c *connectionImpl) SetCurrentDbSchema(schema string) error {
+       // Use the database to execute USE SCHEMA
+       if c.conn != nil && schema != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE SCHEMA %s", 
schema)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set schema: %v", 
err),
+                       }
+               }
+       }
+       c.dbSchema = schema
+       return nil
+}
+
+// TableTypeLister interface implementation
+func (c *connectionImpl) ListTableTypes(ctx context.Context) ([]string, error) 
{
+       // Databricks supports these table types
+       return []string{"TABLE", "VIEW", "EXTERNAL_TABLE", "MANAGED_TABLE"}, nil
+}
+
+func (c *connectionImpl) GetTableTypes(ctx context.Context) 
(array.RecordReader, error) {
+       // Databricks supports these table types
+       tableTypes := []string{"TABLE", "VIEW", "EXTERNAL_TABLE", 
"MANAGED_TABLE"}
+
+       // Create Arrow schema for table types
+       schema := arrow.NewSchema([]arrow.Field{
+               {Name: "table_type", Type: arrow.BinaryTypes.String},
+       }, nil)
+
+       // Create record batch
+       bldr := array.NewRecordBuilder(memory.DefaultAllocator, schema)
+       defer bldr.Release()
+
+       tableTypeBuilder := bldr.Field(0).(*array.StringBuilder)
+       for _, tableType := range tableTypes {
+               tableTypeBuilder.Append(tableType)
+       }
+
+       rec := bldr.NewRecord()
+       defer rec.Release()
+
+       reader, err := array.NewRecordReader(schema, []arrow.Record{rec})
+       if err != nil {
+               return nil, err
+       }
+       return reader, nil
+}
+
+// Transaction methods (Databricks has limited transaction support)
+func (c *connectionImpl) Commit(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.
+       return nil
+}
+
+func (c *connectionImpl) Rollback(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.
+       return nil
+}
+
+// DbObjectsEnumerator interface implementation
+func (c *connectionImpl) GetCatalogs(ctx context.Context, catalogFilter 
*string) ([]string, error) {
+       query := "SHOW CATALOGS"
+       if catalogFilter != nil {
+               query += fmt.Sprintf(" LIKE '%s'", *catalogFilter)
+       }
+
+       rows, err := c.conn.QueryContext(ctx, query)
+       if err != nil {
+               return nil, adbc.Error{
+                       Code: adbc.StatusInternal,
+                       Msg:  fmt.Sprintf("failed to query catalogs: %v", err),
+               }
+       }
+       defer func() { _ = rows.Close() }()
+
+       var catalogs []string
+       for rows.Next() {
+               var catalog string
+               if err := rows.Scan(&catalog); err != nil {
+                       return nil, adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to scan catalog: %v", 
err),
+                       }
+               }
+               catalogs = append(catalogs, catalog)
+       }
+
+       return catalogs, rows.Err()
+}
+
+func (c *connectionImpl) GetDBSchemasForCatalog(ctx context.Context, catalog 
string, schemaFilter *string) ([]string, error) {
+       query := fmt.Sprintf("SHOW SCHEMAS IN %s", catalog)
+       if schemaFilter != nil {
+               query += fmt.Sprintf(" LIKE '%s'", *schemaFilter)
+       }
+
+       rows, err := c.conn.QueryContext(ctx, query)
+       if err != nil {
+               return nil, adbc.Error{
+                       Code: adbc.StatusInternal,
+                       Msg:  fmt.Sprintf("failed to query schemas: %v", err),
+               }
+       }
+       defer func() { _ = rows.Close() }()
+
+       var schemas []string
+       for rows.Next() {
+               var schema string
+               if err := rows.Scan(&schema); err != nil {
+                       return nil, adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to scan schema: %v", 
err),
+                       }
+               }
+               schemas = append(schemas, schema)
+       }
+
+       return schemas, rows.Err()
+}
+
+func (c *connectionImpl) GetTablesForDBSchema(ctx context.Context, catalog 
string, schema string, tableFilter *string, columnFilter *string, 
includeColumns bool) ([]driverbase.TableInfo, error) {
+       query := fmt.Sprintf("SHOW TABLES IN %s.%s", catalog, schema)

Review Comment:
   quoting



##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool

Review Comment:
   The SDK connection type doesn't handle this, huh?



##########
go/adbc/driver/databricks/ipc_reader_adapter.go:
##########
@@ -0,0 +1,183 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "fmt"
+       "io"
+
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/ipc"
+       dbsqlrows "github.com/databricks/databricks-sql-go/rows"
+)
+
+// Check if the rows interface supports IPC streams
+type rowsWithIPCStream interface {
+       GetArrowIPCStreams(context.Context) (dbsqlrows.ArrowIPCStreamIterator, 
error)
+}
+
+// ipcReaderAdapter uses the new IPC stream interface for zero-copy Arrow 
access

Review Comment:
   There is a copy when the buffers are copied from the IPC payloads into the 
allocated buffers. After that, the *Arrow C Data Interface* is zero-copy: user 
of the driver shares the pointers.
   
   ```suggestion
   // ipcReaderAdapter uses the new IPC stream interface for Arrow access
   ```



##########
go/adbc/driver/databricks/ipc_reader_adapter.go:
##########
@@ -0,0 +1,183 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "fmt"
+       "io"
+
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/ipc"
+       dbsqlrows "github.com/databricks/databricks-sql-go/rows"
+)
+
+// Check if the rows interface supports IPC streams
+type rowsWithIPCStream interface {
+       GetArrowIPCStreams(context.Context) (dbsqlrows.ArrowIPCStreamIterator, 
error)
+}
+
+// ipcReaderAdapter uses the new IPC stream interface for zero-copy Arrow 
access
+type ipcReaderAdapter struct {
+       ipcIterator   dbsqlrows.ArrowIPCStreamIterator
+       currentReader *ipc.Reader
+       currentRecord arrow.Record
+       schema        *arrow.Schema
+       closed        bool
+       refCount      int64
+}
+
+// newIPCReaderAdapter creates a RecordReader using direct IPC stream access
+func newIPCReaderAdapter(ctx context.Context, rows dbsqlrows.Rows) 
(array.RecordReader, error) {
+       // Check if rows supports IPC streams
+       ipcRows, ok := rows.(rowsWithIPCStream)
+       if !ok {
+               return nil, fmt.Errorf("databricks rows do not support IPC 
stream access")
+       }
+
+       // Get IPC stream iterator
+       ipcIterator, err := ipcRows.GetArrowIPCStreams(ctx)
+       if err != nil {
+               return nil, fmt.Errorf("failed to get IPC streams: %w", err)
+       }
+
+       adapter := &ipcReaderAdapter{
+               refCount:    1,
+               ipcIterator: ipcIterator,
+       }
+
+       // Initialize the first reader
+       err = adapter.loadNextReader()
+       if err != nil && err != io.EOF {
+               return nil, fmt.Errorf("failed to initialize IPC reader: %w", 
err)
+       }
+
+       return adapter, nil
+}
+
+// loadNextReader loads the next IPC stream into a reader
+func (r *ipcReaderAdapter) loadNextReader() error {
+       // Release current reader if any
+       if r.currentReader != nil {
+               r.currentReader.Release()
+               r.currentReader = nil
+       }
+
+       // Get next IPC stream
+       if !r.ipcIterator.HasNext() {
+               return io.EOF
+       }
+
+       ipcStream, err := r.ipcIterator.Next()
+       if err != nil {
+               return err
+       }
+
+       // Create IPC reader from stream
+       reader, err := ipc.NewReader(ipcStream)
+       if err != nil {
+               return fmt.Errorf("failed to create IPC reader: %w", err)
+       }
+
+       r.currentReader = reader
+
+       // Cache schema from first reader
+       if r.schema == nil {
+               r.schema = reader.Schema()
+       }
+
+       return nil
+}
+
+// Implement array.RecordReader interface
+func (r *ipcReaderAdapter) Schema() *arrow.Schema {
+       return r.schema
+}
+
+func (r *ipcReaderAdapter) Next() bool {
+       if r.closed {
+               return false
+       }
+
+       // Release previous record
+       if r.currentRecord != nil {
+               r.currentRecord.Release()
+               r.currentRecord = nil
+       }
+
+       // Try to get next record from current reader
+       if r.currentReader != nil && r.currentReader.Next() {
+               r.currentRecord = r.currentReader.Record()
+               r.currentRecord.Retain()
+               return true
+       }
+
+       // Need to load next IPC stream
+       err := r.loadNextReader()
+       if err != nil {
+               return false
+       }
+
+       // Try again with new reader
+       if r.currentReader != nil && r.currentReader.Next() {
+               r.currentRecord = r.currentReader.Record()
+               r.currentRecord.Retain()
+               return true
+       }
+
+       return false
+}
+
+func (r *ipcReaderAdapter) Record() arrow.Record {
+       return r.currentRecord
+}
+
+func (r *ipcReaderAdapter) Release() {
+       r.refCount -= 1
+       if !r.closed && r.refCount <= 0 {
+               r.closed = true

Review Comment:
   `closed` seems to exist here as a wait to compensate for bad `refCount` 
handling. Ok to be defensive but maybe assert that `!r.closed` when `Release()` 
is called. The assertion will fail if released more than it should.



##########
go/adbc/driver/databricks/ipc_reader_adapter.go:
##########
@@ -0,0 +1,183 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "fmt"
+       "io"
+
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/ipc"
+       dbsqlrows "github.com/databricks/databricks-sql-go/rows"
+)
+
+// Check if the rows interface supports IPC streams
+type rowsWithIPCStream interface {
+       GetArrowIPCStreams(context.Context) (dbsqlrows.ArrowIPCStreamIterator, 
error)
+}
+
+// ipcReaderAdapter uses the new IPC stream interface for zero-copy Arrow 
access
+type ipcReaderAdapter struct {
+       ipcIterator   dbsqlrows.ArrowIPCStreamIterator
+       currentReader *ipc.Reader
+       currentRecord arrow.Record
+       schema        *arrow.Schema
+       closed        bool
+       refCount      int64
+}
+
+// newIPCReaderAdapter creates a RecordReader using direct IPC stream access
+func newIPCReaderAdapter(ctx context.Context, rows dbsqlrows.Rows) 
(array.RecordReader, error) {
+       // Check if rows supports IPC streams
+       ipcRows, ok := rows.(rowsWithIPCStream)
+       if !ok {
+               return nil, fmt.Errorf("databricks rows do not support IPC 
stream access")
+       }
+
+       // Get IPC stream iterator
+       ipcIterator, err := ipcRows.GetArrowIPCStreams(ctx)
+       if err != nil {
+               return nil, fmt.Errorf("failed to get IPC streams: %w", err)
+       }
+
+       adapter := &ipcReaderAdapter{
+               refCount:    1,
+               ipcIterator: ipcIterator,
+       }
+
+       // Initialize the first reader
+       err = adapter.loadNextReader()
+       if err != nil && err != io.EOF {
+               return nil, fmt.Errorf("failed to initialize IPC reader: %w", 
err)
+       }
+
+       return adapter, nil
+}
+
+// loadNextReader loads the next IPC stream into a reader
+func (r *ipcReaderAdapter) loadNextReader() error {
+       // Release current reader if any
+       if r.currentReader != nil {
+               r.currentReader.Release()
+               r.currentReader = nil
+       }
+
+       // Get next IPC stream
+       if !r.ipcIterator.HasNext() {
+               return io.EOF
+       }

Review Comment:
   If this happens on the first call we won't have a `schema`. Are we in a 
situation where it's impossible to know the schema when queries return empty 
result sets? Again? :(



##########
go/adbc/driver/databricks/ipc_reader_adapter.go:
##########
@@ -0,0 +1,183 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "fmt"
+       "io"
+
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/ipc"
+       dbsqlrows "github.com/databricks/databricks-sql-go/rows"
+)
+
+// Check if the rows interface supports IPC streams
+type rowsWithIPCStream interface {
+       GetArrowIPCStreams(context.Context) (dbsqlrows.ArrowIPCStreamIterator, 
error)
+}
+
+// ipcReaderAdapter uses the new IPC stream interface for zero-copy Arrow 
access
+type ipcReaderAdapter struct {
+       ipcIterator   dbsqlrows.ArrowIPCStreamIterator
+       currentReader *ipc.Reader
+       currentRecord arrow.Record
+       schema        *arrow.Schema
+       closed        bool
+       refCount      int64
+}
+
+// newIPCReaderAdapter creates a RecordReader using direct IPC stream access
+func newIPCReaderAdapter(ctx context.Context, rows dbsqlrows.Rows) 
(array.RecordReader, error) {
+       // Check if rows supports IPC streams
+       ipcRows, ok := rows.(rowsWithIPCStream)
+       if !ok {
+               return nil, fmt.Errorf("databricks rows do not support IPC 
stream access")
+       }
+
+       // Get IPC stream iterator
+       ipcIterator, err := ipcRows.GetArrowIPCStreams(ctx)
+       if err != nil {
+               return nil, fmt.Errorf("failed to get IPC streams: %w", err)
+       }
+
+       adapter := &ipcReaderAdapter{
+               refCount:    1,
+               ipcIterator: ipcIterator,
+       }
+
+       // Initialize the first reader
+       err = adapter.loadNextReader()
+       if err != nil && err != io.EOF {
+               return nil, fmt.Errorf("failed to initialize IPC reader: %w", 
err)
+       }
+
+       return adapter, nil
+}
+
+// loadNextReader loads the next IPC stream into a reader
+func (r *ipcReaderAdapter) loadNextReader() error {
+       // Release current reader if any
+       if r.currentReader != nil {
+               r.currentReader.Release()
+               r.currentReader = nil
+       }
+
+       // Get next IPC stream
+       if !r.ipcIterator.HasNext() {
+               return io.EOF
+       }
+
+       ipcStream, err := r.ipcIterator.Next()
+       if err != nil {
+               return err
+       }
+
+       // Create IPC reader from stream
+       reader, err := ipc.NewReader(ipcStream)
+       if err != nil {
+               return fmt.Errorf("failed to create IPC reader: %w", err)
+       }
+
+       r.currentReader = reader
+
+       // Cache schema from first reader
+       if r.schema == nil {
+               r.schema = reader.Schema()
+       }
+
+       return nil
+}
+
+// Implement array.RecordReader interface
+func (r *ipcReaderAdapter) Schema() *arrow.Schema {
+       return r.schema
+}
+
+func (r *ipcReaderAdapter) Next() bool {
+       if r.closed {
+               return false
+       }
+
+       // Release previous record
+       if r.currentRecord != nil {
+               r.currentRecord.Release()
+               r.currentRecord = nil
+       }
+
+       // Try to get next record from current reader
+       if r.currentReader != nil && r.currentReader.Next() {
+               r.currentRecord = r.currentReader.Record()
+               r.currentRecord.Retain()
+               return true
+       }
+
+       // Need to load next IPC stream
+       err := r.loadNextReader()
+       if err != nil {
+               return false
+       }

Review Comment:
   ```suggestion
        if err != nil {
            // Err() will return `r.currentReader.Err()` which contains this 
error
                return false
        }
   ```



##########
go/adbc/driver/databricks/connection.go:
##########
@@ -0,0 +1,284 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package databricks
+
+import (
+       "context"
+       "database/sql"
+       "fmt"
+
+       "github.com/apache/arrow-adbc/go/adbc"
+       "github.com/apache/arrow-adbc/go/adbc/driver/internal/driverbase"
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       _ "github.com/databricks/databricks-sql-go"
+)
+
+type connectionImpl struct {
+       driverbase.ConnectionImplBase
+
+       // Connection settings
+       catalog  string
+       dbSchema string
+
+       // Database connection
+       conn *sql.Conn
+
+       // Current autocommit state
+       autocommit bool
+}
+
+func (c *connectionImpl) Close() error {
+       if c.conn == nil {
+               return adbc.Error{Code: adbc.StatusInvalidState}
+       }
+       defer func() {
+               c.conn = nil
+       }()
+       return c.conn.Close()
+}
+
+func (c *connectionImpl) NewStatement() (adbc.Statement, error) {
+       return &statementImpl{
+               conn: c,
+       }, nil
+}
+
+// Autocommit interface implementation
+func (c *connectionImpl) GetAutocommit() bool {
+       return c.autocommit
+}
+
+func (c *connectionImpl) SetAutocommit(autocommit bool) error {
+       // Databricks SQL doesn't support explicit transaction control in the 
same way
+       // as traditional databases. Most operations are implicitly committed.
+       // We'll track the autocommit state but won't change the underlying 
connection behavior.
+       c.autocommit = autocommit
+       return nil
+}
+
+// CurrentNamespacer interface implementation
+func (c *connectionImpl) GetCurrentCatalog() (string, error) {
+       return c.catalog, nil
+}
+
+func (c *connectionImpl) GetCurrentDbSchema() (string, error) {
+       return c.dbSchema, nil
+}
+
+func (c *connectionImpl) SetCurrentCatalog(catalog string) error {
+       // Use the database to execute USE CATALOG
+       if c.conn != nil && catalog != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE CATALOG %s", 
catalog)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set catalog: %v", 
err),
+                       }
+               }
+       }
+       c.catalog = catalog
+       return nil
+}
+
+func (c *connectionImpl) SetCurrentDbSchema(schema string) error {
+       // Use the database to execute USE SCHEMA
+       if c.conn != nil && schema != "" {
+               _, err := c.conn.ExecContext(context.TODO(), "USE SCHEMA %s", 
schema)
+               if err != nil {
+                       return adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to set schema: %v", 
err),
+                       }
+               }
+       }
+       c.dbSchema = schema
+       return nil
+}
+
+// TableTypeLister interface implementation
+func (c *connectionImpl) ListTableTypes(ctx context.Context) ([]string, error) 
{
+       // Databricks supports these table types
+       return []string{"TABLE", "VIEW", "EXTERNAL_TABLE", "MANAGED_TABLE"}, nil
+}
+
+func (c *connectionImpl) GetTableTypes(ctx context.Context) 
(array.RecordReader, error) {
+       // Databricks supports these table types
+       tableTypes := []string{"TABLE", "VIEW", "EXTERNAL_TABLE", 
"MANAGED_TABLE"}
+
+       // Create Arrow schema for table types
+       schema := arrow.NewSchema([]arrow.Field{
+               {Name: "table_type", Type: arrow.BinaryTypes.String},
+       }, nil)
+
+       // Create record batch
+       bldr := array.NewRecordBuilder(memory.DefaultAllocator, schema)
+       defer bldr.Release()
+
+       tableTypeBuilder := bldr.Field(0).(*array.StringBuilder)
+       for _, tableType := range tableTypes {
+               tableTypeBuilder.Append(tableType)
+       }
+
+       rec := bldr.NewRecord()
+       defer rec.Release()
+
+       reader, err := array.NewRecordReader(schema, []arrow.Record{rec})
+       if err != nil {
+               return nil, err
+       }
+       return reader, nil
+}
+
+// Transaction methods (Databricks has limited transaction support)
+func (c *connectionImpl) Commit(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.
+       return nil
+}
+
+func (c *connectionImpl) Rollback(ctx context.Context) error {
+       // Databricks SQL doesn't support explicit transactions in the 
traditional sense.
+       // Most operations are auto-committed. We'll track state but not 
perform any operation.
+       return nil
+}
+
+// DbObjectsEnumerator interface implementation
+func (c *connectionImpl) GetCatalogs(ctx context.Context, catalogFilter 
*string) ([]string, error) {
+       query := "SHOW CATALOGS"
+       if catalogFilter != nil {
+               query += fmt.Sprintf(" LIKE '%s'", *catalogFilter)
+       }
+
+       rows, err := c.conn.QueryContext(ctx, query)
+       if err != nil {
+               return nil, adbc.Error{
+                       Code: adbc.StatusInternal,
+                       Msg:  fmt.Sprintf("failed to query catalogs: %v", err),
+               }
+       }
+       defer func() { _ = rows.Close() }()
+
+       var catalogs []string
+       for rows.Next() {
+               var catalog string
+               if err := rows.Scan(&catalog); err != nil {
+                       return nil, adbc.Error{
+                               Code: adbc.StatusInternal,
+                               Msg:  fmt.Sprintf("failed to scan catalog: %v", 
err),
+                       }
+               }
+               catalogs = append(catalogs, catalog)
+       }
+
+       return catalogs, rows.Err()
+}
+
+func (c *connectionImpl) GetDBSchemasForCatalog(ctx context.Context, catalog 
string, schemaFilter *string) ([]string, error) {
+       query := fmt.Sprintf("SHOW SCHEMAS IN %s", catalog)

Review Comment:
   Same comment about quoting



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to