This is an automated email from the ASF dual-hosted git repository.
zeroshade pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-go.git
The following commit(s) were added to refs/heads/main by this push:
new d0bad181 refactor(arrow): last increment of the Record -> RecordBatch
migration (#522)
d0bad181 is described below
commit d0bad181c1665e5f5ed72c3b415029116dbae5f6
Author: Mandukhai Alimaa <[email protected]>
AuthorDate: Mon Oct 6 15:01:22 2025 -0500
refactor(arrow): last increment of the Record -> RecordBatch migration
(#522)
### Rationale for this change
Rename the Record interface to RecordBatch for clarity, since Record
commonly means a single row but this type represents a batch of rows.
### What changes are included in this PR?
The rest of the code is now changed to use the RecordBatch method and
updated interface.
### Are these changes tested?
Yes. All affected packages build successfully and core tests pass.
### Are there any user-facing changes?
No user facing API changes.
Closes #457
---
.golangci.yaml | 4 --
arrow/array/extension_test.go | 2 +-
arrow/array/json_reader.go | 6 +-
arrow/array/json_reader_test.go | 10 ++--
arrow/array/record.go | 4 +-
arrow/array/record_test.go | 32 +++++-----
arrow/array/table.go | 2 +-
arrow/array/table_test.go | 14 ++---
arrow/array/union_test.go | 2 +-
arrow/array/util.go | 6 +-
arrow/array/util_test.go | 4 +-
arrow/avro/avro2parquet/main.go | 2 +-
arrow/avro/loader.go | 6 +-
arrow/avro/reader_test.go | 2 +-
arrow/cdata/cdata_exports.go | 2 +-
arrow/cdata/cdata_test.go | 4 +-
arrow/cdata/interface.go | 2 +-
arrow/cdata/test/test_cimport.go | 4 +-
arrow/compute/expression.go | 4 +-
arrow/compute/exprs/exec_internal_test.go | 6 +-
arrow/compute/exprs/exec_test.go | 18 +++---
arrow/compute/fieldref_test.go | 2 +-
arrow/compute/selection.go | 4 +-
arrow/compute/vector_selection_test.go | 4 +-
arrow/csv/example_csv_test.go | 2 +-
arrow/csv/reader.go | 6 +-
arrow/csv/reader_test.go | 28 ++++-----
arrow/csv/writer_test.go | 14 ++---
arrow/example_table_creation_test.go | 2 +-
arrow/example_test.go | 14 ++---
arrow/examples/table_creation/main.go | 2 +-
arrow/extensions/bool8_test.go | 6 +-
arrow/extensions/json_test.go | 4 +-
arrow/extensions/opaque_test.go | 2 +-
arrow/extensions/uuid_test.go | 6 +-
arrow/flight/flightsql/client.go | 6 +-
arrow/flight/flightsql/client_test.go | 2 +-
arrow/flight/flightsql/driver/driver.go | 10 ++--
arrow/flight/flightsql/driver/utils_test.go | 2 +-
arrow/flight/flightsql/example/sql_batch_reader.go | 2 +-
arrow/flight/flightsql/example/sqlite_server.go | 6 +-
.../example/sqlite_tables_schema_batch_reader.go | 4 +-
arrow/flight/flightsql/example/type_info.go | 2 +-
arrow/flight/flightsql/server.go | 4 +-
arrow/flight/flightsql/server_test.go | 20 +++----
arrow/flight/flightsql/sqlite_server_test.go | 70 +++++++++++-----------
arrow/flight/record_batch_reader.go | 6 +-
arrow/internal/arrdata/arrdata.go | 38 ++++++------
arrow/internal/arrdata/ioutil.go | 6 +-
arrow/internal/arrjson/arrjson.go | 2 +-
arrow/internal/arrjson/writer.go | 2 +-
arrow/internal/flight_integration/scenario.go | 24 ++++----
arrow/ipc/cmd/arrow-cat/main.go | 4 +-
arrow/ipc/cmd/arrow-cat/main_test.go | 2 +-
arrow/ipc/cmd/arrow-ls/main_test.go | 2 +-
arrow/ipc/file_reader.go | 2 +-
arrow/ipc/ipc_test.go | 54 ++++++++---------
arrow/ipc/message_test.go | 6 +-
arrow/ipc/metadata_test.go | 4 +-
arrow/ipc/reader_test.go | 12 ++--
arrow/ipc/writer.go | 2 +-
arrow/ipc/writer_test.go | 14 ++---
arrow/util/byte_size_test.go | 6 +-
parquet/example_write_read_pq_test.go | 4 +-
parquet/file/column_writer_test.go | 2 +-
parquet/file/file_reader_test.go | 8 +--
parquet/pqarrow/encode_arrow_test.go | 28 ++++-----
parquet/pqarrow/encode_dictionary_test.go | 4 +-
parquet/pqarrow/file_reader.go | 8 +--
parquet/pqarrow/file_reader_test.go | 42 ++++++-------
parquet/pqarrow/file_writer.go | 16 ++---
parquet/pqarrow/reader_writer_test.go | 4 +-
72 files changed, 329 insertions(+), 333 deletions(-)
diff --git a/.golangci.yaml b/.golangci.yaml
index 36443a06..36ed5f08 100644
--- a/.golangci.yaml
+++ b/.golangci.yaml
@@ -31,10 +31,6 @@ linters:
- third_party$
- builtin$
- examples$
- rules:
- - linters:
- - staticcheck
- text: "SA1019"
issues:
fix: true
formatters:
diff --git a/arrow/array/extension_test.go b/arrow/array/extension_test.go
index 09aa0a33..b2eba8e0 100644
--- a/arrow/array/extension_test.go
+++ b/arrow/array/extension_test.go
@@ -70,7 +70,7 @@ func (e *ExtensionTypeTestSuite) TestParametricArrays() {
p4 := exampleParametric(pool, p4Type, []int32{5, 6, 7, 9}, nil)
defer p4.Release()
- rb := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ rb := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "f0", Type: p1Type, Nullable: true},
{Name: "f1", Type: p2Type, Nullable: true},
{Name: "f2", Type: p3Type, Nullable: true},
diff --git a/arrow/array/json_reader.go b/arrow/array/json_reader.go
index a5bc1ba8..d9ef5ed7 100644
--- a/arrow/array/json_reader.go
+++ b/arrow/array/json_reader.go
@@ -181,7 +181,7 @@ func (r *JSONReader) nextall() bool {
for r.readNext() {
}
- r.cur = r.bldr.NewRecord()
+ r.cur = r.bldr.NewRecordBatch()
return r.cur.NumRows() > 0
}
@@ -190,7 +190,7 @@ func (r *JSONReader) next1() bool {
return false
}
- r.cur = r.bldr.NewRecord()
+ r.cur = r.bldr.NewRecordBatch()
return true
}
@@ -204,7 +204,7 @@ func (r *JSONReader) nextn() bool {
}
if n > 0 {
- r.cur = r.bldr.NewRecord()
+ r.cur = r.bldr.NewRecordBatch()
}
return n > 0
}
diff --git a/arrow/array/json_reader_test.go b/arrow/array/json_reader_test.go
index d73120be..854c8eac 100644
--- a/arrow/array/json_reader_test.go
+++ b/arrow/array/json_reader_test.go
@@ -60,7 +60,7 @@ func TestJSONReader(t *testing.T) {
n := 0
for rdr.Next() {
n++
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
assert.NotNil(t, rec)
assert.EqualValues(t, 1, rec.NumRows())
assert.EqualValues(t, 3, rec.NumCols())
@@ -84,7 +84,7 @@ func TestJSONReaderAll(t *testing.T) {
defer rdr.Release()
assert.True(t, rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
assert.NotNil(t, rec)
assert.NoError(t, rdr.Err())
@@ -109,7 +109,7 @@ func TestJSONReaderChunked(t *testing.T) {
n := 0
for rdr.Next() {
n++
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
assert.NotNil(t, rec)
assert.NoError(t, rdr.Err())
assert.EqualValues(t, 4, rec.NumRows())
@@ -137,7 +137,7 @@ func TestUnmarshalJSON(t *testing.T) {
err := recordBuilder.UnmarshalJSON([]byte(jsondata))
assert.NoError(t, err)
- record := recordBuilder.NewRecord()
+ record := recordBuilder.NewRecordBatch()
defer record.Release()
assert.NotNil(t, record)
@@ -243,7 +243,7 @@ func BenchmarkJSONReader(b *testing.B) {
var totalRows int64
for jsonRdr.Next() {
- rec := jsonRdr.Record()
+ rec :=
jsonRdr.RecordBatch()
totalRows +=
rec.NumRows()
}
diff --git a/arrow/array/record.go b/arrow/array/record.go
index e71e46e3..cca79661 100644
--- a/arrow/array/record.go
+++ b/arrow/array/record.go
@@ -39,7 +39,7 @@ type RecordReader interface {
Next() bool
RecordBatch() arrow.RecordBatch
// Deprecated: Use [RecordBatch] instead.
- Record() arrow.Record
+ Record() arrow.RecordBatch
Err() error
}
@@ -102,7 +102,7 @@ func (rs *simpleRecords) Schema() *arrow.Schema {
return rs.schema }
func (rs *simpleRecords) RecordBatch() arrow.RecordBatch { return rs.cur }
// Deprecated: Use [RecordBatch] instead.
-func (rs *simpleRecords) Record() arrow.Record { return rs.RecordBatch() }
+func (rs *simpleRecords) Record() arrow.RecordBatch { return rs.RecordBatch() }
func (rs *simpleRecords) Next() bool {
if len(rs.recs) == 0 {
return false
diff --git a/arrow/array/record_test.go b/arrow/array/record_test.go
index 2a61bddf..5900efe7 100644
--- a/arrow/array/record_test.go
+++ b/arrow/array/record_test.go
@@ -66,7 +66,7 @@ func TestRecord(t *testing.T) {
defer col2_1.Release()
cols := []arrow.Array{col1, col2}
- rec := array.NewRecord(schema, cols, -1)
+ rec := array.NewRecordBatch(schema, cols, -1)
defer rec.Release()
rec.Retain()
@@ -232,7 +232,7 @@ func TestRecord(t *testing.T) {
}
}()
}
- rec := array.NewRecord(tc.schema, tc.cols, tc.rows)
+ rec := array.NewRecordBatch(tc.schema, tc.cols, tc.rows)
defer rec.Release()
if got, want := rec.NumRows(), tc.rows; got != want {
t.Fatalf("invalid number of rows: got=%d,
want=%d", got, want)
@@ -252,7 +252,7 @@ func TestRecordReader(t *testing.T) {
},
nil,
)
- rec1 := func() arrow.Record {
+ rec1 := func() arrow.RecordBatch {
col1 := func() arrow.Array {
ib := array.NewInt32Builder(mem)
defer ib.Release()
@@ -272,11 +272,11 @@ func TestRecordReader(t *testing.T) {
defer col2.Release()
cols := []arrow.Array{col1, col2}
- return array.NewRecord(schema, cols, -1)
+ return array.NewRecordBatch(schema, cols, -1)
}()
defer rec1.Release()
- rec2 := func() arrow.Record {
+ rec2 := func() arrow.RecordBatch {
col1 := func() arrow.Array {
ib := array.NewInt32Builder(mem)
defer ib.Release()
@@ -296,11 +296,11 @@ func TestRecordReader(t *testing.T) {
defer col2.Release()
cols := []arrow.Array{col1, col2}
- return array.NewRecord(schema, cols, -1)
+ return array.NewRecordBatch(schema, cols, -1)
}()
defer rec2.Release()
- recs := []arrow.Record{rec1, rec2}
+ recs := []arrow.RecordBatch{rec1, rec2}
t.Run("simple reader", func(t *testing.T) {
itr, err := array.NewRecordReader(schema, recs)
if err != nil {
@@ -318,7 +318,7 @@ func TestRecordReader(t *testing.T) {
n := 0
for itr.Next() {
n++
- if got, want := itr.Record(), recs[n-1];
!reflect.DeepEqual(got, want) {
+ if got, want := itr.RecordBatch(), recs[n-1];
!reflect.DeepEqual(got, want) {
t.Fatalf("itr[%d], invalid record. got=%#v,
want=%#v", n-1, got, want)
}
}
@@ -332,7 +332,7 @@ func TestRecordReader(t *testing.T) {
})
t.Run("iter to reader", func(t *testing.T) {
- itr := func(yield func(arrow.Record, error) bool) {
+ itr := func(yield func(arrow.RecordBatch, error) bool) {
for _, r := range recs {
if !yield(r, nil) {
return
@@ -357,8 +357,8 @@ func TestRecordReader(t *testing.T) {
// by default it will release records when the reader
is released
// leading to too many releases on the original record
// so we retain it to keep it from going away while the
test runs
- rdr.Record().Retain()
- if got, want := rdr.Record(), recs[n-1];
!reflect.DeepEqual(got, want) {
+ rdr.RecordBatch().Retain()
+ if got, want := rdr.RecordBatch(), recs[n-1];
!reflect.DeepEqual(got, want) {
t.Fatalf("itr[%d], invalid record. got=%#v,
want=%#v", n-1, got, want)
}
}
@@ -465,7 +465,7 @@ func TestRecordBuilderRespectsFixedSizeArrayNullability(t
*testing.T) {
vb := lb.ValueBuilder().(*array.Int32Builder)
vb.Append(10)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
if got, want := rec.Column(0).String(), "[[10]]"; got
!= want {
@@ -510,7 +510,7 @@ func TestRecordBuilder(t *testing.T) {
}
}
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
if got, want := rec.Schema(), schema; !got.Equal(want) {
@@ -614,7 +614,7 @@ var testMessageSchema = arrow.NewSchema(
nil,
)
-func (m *testMessage) Fill(rec arrow.Record, row int) error {
+func (m *testMessage) Fill(rec arrow.RecordBatch, row int) error {
m.Reset()
// foo
@@ -713,8 +713,8 @@ type testMessageArrowRecordBuilder struct {
rb *array.RecordBuilder
}
-func (b *testMessageArrowRecordBuilder) Build() arrow.Record {
- return b.rb.NewRecord()
+func (b *testMessageArrowRecordBuilder) Build() arrow.RecordBatch {
+ return b.rb.NewRecordBatch()
}
func (b *testMessageArrowRecordBuilder) Release() {
diff --git a/arrow/array/table.go b/arrow/array/table.go
index 9ba65bf2..227c6a7e 100644
--- a/arrow/array/table.go
+++ b/arrow/array/table.go
@@ -374,7 +374,7 @@ func (tr *TableReader) Next() bool {
}
tr.cur += chunksz
- tr.rec = NewRecord(tr.tbl.Schema(), batch, chunksz)
+ tr.rec = NewRecordBatch(tr.tbl.Schema(), batch, chunksz)
for _, arr := range batch {
arr.Release()
diff --git a/arrow/array/table_test.go b/arrow/array/table_test.go
index e4b84345..e37a231f 100644
--- a/arrow/array/table_test.go
+++ b/arrow/array/table_test.go
@@ -632,16 +632,16 @@ func TestTableFromRecords(t *testing.T) {
b.Field(0).(*array.Int32Builder).AppendValues([]int32{7, 8, 9, 10},
[]bool{true, true, false, true})
b.Field(1).(*array.Float64Builder).AppendValues([]float64{1, 2, 3, 4,
5, 6, 7, 8, 9, 10}, nil)
- rec1 := b.NewRecord()
+ rec1 := b.NewRecordBatch()
defer rec1.Release()
b.Field(0).(*array.Int32Builder).AppendValues([]int32{11, 12, 13, 14,
15, 16, 17, 18, 19, 20}, nil)
b.Field(1).(*array.Float64Builder).AppendValues([]float64{11, 12, 13,
14, 15, 16, 17, 18, 19, 20}, nil)
- rec2 := b.NewRecord()
+ rec2 := b.NewRecordBatch()
defer rec2.Release()
- tbl := array.NewTableFromRecords(schema, []arrow.Record{rec1, rec2})
+ tbl := array.NewTableFromRecords(schema, []arrow.RecordBatch{rec1,
rec2})
defer tbl.Release()
if got, want := tbl.Schema(), schema; !got.Equal(want) {
@@ -763,7 +763,7 @@ func TestTableReader(t *testing.T) {
sum int64
)
for tr.Next() {
- rec := tr.Record()
+ rec := tr.RecordBatch()
if got, want := rec.Schema(), tbl.Schema();
!got.Equal(want) {
t.Fatalf("invalid schema: got=%#v,
want=%#v", got, want)
}
@@ -806,16 +806,16 @@ func TestTableToString(t *testing.T) {
b.Field(0).(*array.Int32Builder).AppendValues([]int32{7, 8, 9, 10},
[]bool{true, true, false, true})
b.Field(1).(*array.Float64Builder).AppendValues([]float64{11, 12, 13,
14, 15, 16, 17, 18, 19, 20}, nil)
- rec1 := b.NewRecord()
+ rec1 := b.NewRecordBatch()
defer rec1.Release()
b.Field(0).(*array.Int32Builder).AppendValues([]int32{111, 112, 113,
114, 115, 116, 117, 118, 119, 120}, nil)
b.Field(1).(*array.Float64Builder).AppendValues([]float64{211, 212,
213, 214, 215, 216, 217, 218, 219, 220}, nil)
- rec2 := b.NewRecord()
+ rec2 := b.NewRecordBatch()
defer rec2.Release()
- tbl := array.NewTableFromRecords(schema, []arrow.Record{rec1, rec2})
+ tbl := array.NewTableFromRecords(schema, []arrow.RecordBatch{rec1,
rec2})
defer tbl.Release()
table_str := tbl.String()
diff --git a/arrow/array/union_test.go b/arrow/array/union_test.go
index 1066f9eb..aa03056a 100644
--- a/arrow/array/union_test.go
+++ b/arrow/array/union_test.go
@@ -80,7 +80,7 @@ func TestUnionSliceEquals(t *testing.T) {
defer sparse.Release()
defer dense.Release()
- batch := array.NewRecord(schema, []arrow.Array{sparse, dense}, -1)
+ batch := array.NewRecordBatch(schema, []arrow.Array{sparse, dense}, -1)
defer batch.Release()
checkUnion := func(arr arrow.Array) {
diff --git a/arrow/array/util.go b/arrow/array/util.go
index 11430d86..6a1d29cb 100644
--- a/arrow/array/util.go
+++ b/arrow/array/util.go
@@ -202,7 +202,7 @@ func RecordFromStructArray(in *Struct, schema
*arrow.Schema) arrow.RecordBatch {
schema =
arrow.NewSchema(in.DataType().(*arrow.StructType).Fields(), nil)
}
- return NewRecord(schema, in.fields, int64(in.Len()))
+ return NewRecordBatch(schema, in.fields, int64(in.Len()))
}
// RecordFromJSON creates a record batch from JSON data. See array.FromJSON
for the details
@@ -261,7 +261,7 @@ func RecordFromJSON(mem memory.Allocator, schema
*arrow.Schema, r io.Reader, opt
return nil, dec.InputOffset(), fmt.Errorf("failed to
decode json: %w", err)
}
- return bldr.NewRecord(), dec.InputOffset(), nil
+ return bldr.NewRecordBatch(), dec.InputOffset(), nil
}
for {
@@ -274,7 +274,7 @@ func RecordFromJSON(mem memory.Allocator, schema
*arrow.Schema, r io.Reader, opt
}
}
- return bldr.NewRecord(), dec.InputOffset(), nil
+ return bldr.NewRecordBatch(), dec.InputOffset(), nil
}
// RecordToJSON writes out the given record following the format of each row
is a single object
diff --git a/arrow/array/util_test.go b/arrow/array/util_test.go
index 10074130..fb837871 100644
--- a/arrow/array/util_test.go
+++ b/arrow/array/util_test.go
@@ -532,13 +532,13 @@ func TestRecordBuilderUnmarshalJSONExtraFields(t
*testing.T) {
require.True(t, s.Scan())
require.NoError(t, bldr.UnmarshalJSON(s.Bytes()))
- rec1 := bldr.NewRecord()
+ rec1 := bldr.NewRecordBatch()
defer rec1.Release()
require.True(t, s.Scan())
require.NoError(t, bldr.UnmarshalJSON(s.Bytes()))
- rec2 := bldr.NewRecord()
+ rec2 := bldr.NewRecordBatch()
defer rec2.Release()
assert.Truef(t, array.RecordEqual(rec1, rec2), "expected: %s\nactual:
%s", rec1, rec2)
diff --git a/arrow/avro/avro2parquet/main.go b/arrow/avro/avro2parquet/main.go
index 65e84fef..663ae58b 100644
--- a/arrow/avro/avro2parquet/main.go
+++ b/arrow/avro/avro2parquet/main.go
@@ -102,7 +102,7 @@ func main() {
fmt.Println(err)
os.Exit(6)
}
- recs := av2arReader.Record()
+ recs := av2arReader.RecordBatch()
err = pr.WriteBuffered(recs)
if err != nil {
fmt.Println(err)
diff --git a/arrow/avro/loader.go b/arrow/avro/loader.go
index 26d8678e..a7199e66 100644
--- a/arrow/avro/loader.go
+++ b/arrow/avro/loader.go
@@ -59,7 +59,7 @@ func (r *OCFReader) recordFactory() {
return
}
}
- r.recChan <- r.bld.NewRecord()
+ r.recChan <- r.bld.NewRecordBatch()
r.bldDone <- struct{}{}
case r.chunk >= 1:
for data := range r.avroChan {
@@ -73,12 +73,12 @@ func (r *OCFReader) recordFactory() {
}
recChunk++
if recChunk >= r.chunk {
- r.recChan <- r.bld.NewRecord()
+ r.recChan <- r.bld.NewRecordBatch()
recChunk = 0
}
}
if recChunk != 0 {
- r.recChan <- r.bld.NewRecord()
+ r.recChan <- r.bld.NewRecordBatch()
}
r.bldDone <- struct{}{}
}
diff --git a/arrow/avro/reader_test.go b/arrow/avro/reader_test.go
index d87948a7..9e4f36eb 100644
--- a/arrow/avro/reader_test.go
+++ b/arrow/avro/reader_test.go
@@ -201,7 +201,7 @@ func TestReader(t *testing.T) {
if !exists {
t.Error("no record exists")
}
- a, err := ar.Record().MarshalJSON()
+ a, err := ar.RecordBatch().MarshalJSON()
assert.NoError(t, err)
var avroParsed []map[string]any
json.Unmarshal(a, &avroParsed)
diff --git a/arrow/cdata/cdata_exports.go b/arrow/cdata/cdata_exports.go
index ea145c01..9020ddf1 100644
--- a/arrow/cdata/cdata_exports.go
+++ b/arrow/cdata/cdata_exports.go
@@ -470,7 +470,7 @@ func (rr cRecordReader) getSchema(out *CArrowSchema) int {
func (rr cRecordReader) next(out *CArrowArray) int {
if rr.rdr.Next() {
- ExportArrowRecordBatch(rr.rdr.Record(), out, nil)
+ ExportArrowRecordBatch(rr.rdr.RecordBatch(), out, nil)
return 0
}
C.ArrowArrayMarkReleased(out)
diff --git a/arrow/cdata/cdata_test.go b/arrow/cdata/cdata_test.go
index 4db1abd5..169ed848 100644
--- a/arrow/cdata/cdata_test.go
+++ b/arrow/cdata/cdata_test.go
@@ -774,7 +774,7 @@ func TestRecordBatch(t *testing.T) {
assert.Equal(t, "a", rbschema.Field(0).Name)
assert.Equal(t, "b", rbschema.Field(1).Name)
- rec := array.NewRecord(rbschema,
[]arrow.Array{arr.(*array.Struct).Field(0), arr.(*array.Struct).Field(1)}, -1)
+ rec := array.NewRecordBatch(rbschema,
[]arrow.Array{arr.(*array.Struct).Field(0), arr.(*array.Struct).Field(1)}, -1)
defer rec.Release()
assert.True(t, array.RecordEqual(rb, rec))
@@ -855,7 +855,7 @@ func TestExportRecordReaderStreamLifetime(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
- rec := array.NewRecord(schema, []arrow.Array{arr}, 0)
+ rec := array.NewRecordBatch(schema, []arrow.Array{arr}, 0)
defer rec.Release()
rdr, _ := array.NewRecordReader(schema, []arrow.RecordBatch{rec})
diff --git a/arrow/cdata/interface.go b/arrow/cdata/interface.go
index 6483a168..901371f1 100644
--- a/arrow/cdata/interface.go
+++ b/arrow/cdata/interface.go
@@ -129,7 +129,7 @@ func ImportCRecordBatchWithSchema(arr *CArrowArray, sc
*arrow.Schema) (arrow.Rec
cols[i] = st.Field(i)
}
- return array.NewRecord(sc, cols, int64(st.Len())), nil
+ return array.NewRecordBatch(sc, cols, int64(st.Len())), nil
}
// ImportCRecordBatch imports an ArrowArray from C as a record batch. If err
is not nil,
diff --git a/arrow/cdata/test/test_cimport.go b/arrow/cdata/test/test_cimport.go
index 65cda82b..6636eb32 100644
--- a/arrow/cdata/test/test_cimport.go
+++ b/arrow/cdata/test/test_cimport.go
@@ -93,7 +93,7 @@ func importRecordBatch(scptr, rbptr uintptr) {
lb.Append(true)
vb.AppendValues([]int32{2, 42}, nil)
- expectedRec := bldr.NewRecord()
+ expectedRec := bldr.NewRecordBatch()
defer expectedRec.Release()
if !array.RecordEqual(expectedRec, rec) {
@@ -126,7 +126,7 @@ func makeBatch() arrow.RecordBatch {
valbldr.Append(2)
valbldr.Append(42)
- return bldr.NewRecord()
+ return bldr.NewRecordBatch()
}
//export exportSchema
diff --git a/arrow/compute/expression.go b/arrow/compute/expression.go
index 0de425fb..6a494cef 100644
--- a/arrow/compute/expression.go
+++ b/arrow/compute/expression.go
@@ -687,7 +687,7 @@ func SerializeOptions(opts FunctionOptions, mem
memory.Allocator) (*memory.Buffe
}
defer arr.Release()
- batch := array.NewRecord(arrow.NewSchema([]arrow.Field{{Type:
arr.DataType(), Nullable: true}}, nil), []arrow.Array{arr}, 1)
+ batch := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Type:
arr.DataType(), Nullable: true}}, nil), []arrow.Array{arr}, 1)
defer batch.Release()
buf := &bufferWriteSeeker{mem: mem}
@@ -786,7 +786,7 @@ func SerializeExpr(expr Expression, mem memory.Allocator)
(*memory.Buffer, error
}
metadata := arrow.NewMetadata(metaKey, metaValue)
- rec := array.NewRecord(arrow.NewSchema(fields, &metadata), cols, 1)
+ rec := array.NewRecordBatch(arrow.NewSchema(fields, &metadata), cols, 1)
defer rec.Release()
buf := &bufferWriteSeeker{mem: mem}
diff --git a/arrow/compute/exprs/exec_internal_test.go
b/arrow/compute/exprs/exec_internal_test.go
index 86562731..199f82ad 100644
--- a/arrow/compute/exprs/exec_internal_test.go
+++ b/arrow/compute/exprs/exec_internal_test.go
@@ -69,12 +69,12 @@ func TestMakeExecBatch(t *testing.T) {
tests := []struct {
name string
- batch arrow.Record
+ batch arrow.RecordBatch
}{
{"empty", empty},
- {"subset",
array.NewRecord(arrow.NewSchema([]arrow.Field{getField("i32"),
getField("f32")}, nil),
+ {"subset",
array.NewRecordBatch(arrow.NewSchema([]arrow.Field{getField("i32"),
getField("f32")}, nil),
[]arrow.Array{i32, f32}, numRows)},
- {"flipped subset",
array.NewRecord(arrow.NewSchema([]arrow.Field{getField("f32"),
getField("i32")}, nil),
+ {"flipped subset",
array.NewRecordBatch(arrow.NewSchema([]arrow.Field{getField("f32"),
getField("i32")}, nil),
[]arrow.Array{f32, i32}, numRows)},
}
diff --git a/arrow/compute/exprs/exec_test.go b/arrow/compute/exprs/exec_test.go
index 0ef48528..612cc248 100644
--- a/arrow/compute/exprs/exec_test.go
+++ b/arrow/compute/exprs/exec_test.go
@@ -486,7 +486,7 @@ func Test_Types(t *testing.T) {
tt := []struct {
name string
schema func() *arrow.Schema
- record func(rq *require.Assertions, schema *arrow.Schema)
arrow.Record
+ record func(rq *require.Assertions, schema *arrow.Schema)
arrow.RecordBatch
val func(rq *require.Assertions) expr.Literal
}{
{
@@ -500,7 +500,7 @@ func Test_Types(t *testing.T) {
return arrow.NewSchema([]arrow.Field{field},
nil)
},
- record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.Record {
+ record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.RecordBatch {
b :=
array.NewTime64Builder(memory.DefaultAllocator, &arrow.Time64Type{Unit:
arrow.Nanosecond})
defer b.Release()
@@ -509,7 +509,7 @@ func Test_Types(t *testing.T) {
b.AppendValues([]arrow.Time64{t1}, []bool{true})
- return array.NewRecord(schema,
[]arrow.Array{b.NewArray()}, 1)
+ return array.NewRecordBatch(schema,
[]arrow.Array{b.NewArray()}, 1)
},
val: func(rq *require.Assertions) expr.Literal {
v, err :=
arrow.Time64FromString("11:00:00.000000", arrow.Nanosecond)
@@ -529,7 +529,7 @@ func Test_Types(t *testing.T) {
return arrow.NewSchema([]arrow.Field{field},
nil)
},
- record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.Record {
+ record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.RecordBatch {
b :=
array.NewTime64Builder(memory.DefaultAllocator, &arrow.Time64Type{Unit:
arrow.Nanosecond})
defer b.Release()
@@ -538,7 +538,7 @@ func Test_Types(t *testing.T) {
b.AppendValues([]arrow.Time64{t1}, []bool{true})
- return array.NewRecord(schema,
[]arrow.Array{b.NewArray()}, 1)
+ return array.NewRecordBatch(schema,
[]arrow.Array{b.NewArray()}, 1)
},
val: func(rq *require.Assertions) expr.Literal {
v, err :=
arrow.Time64FromString("11:00:00.000000", arrow.Nanosecond)
@@ -566,7 +566,7 @@ func Test_Types(t *testing.T) {
return arrow.NewSchema([]arrow.Field{field},
nil)
},
- record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.Record {
+ record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.RecordBatch {
b :=
array.NewTimestampBuilder(memory.DefaultAllocator, &arrow.TimestampType{Unit:
arrow.Nanosecond})
defer b.Release()
@@ -575,7 +575,7 @@ func Test_Types(t *testing.T) {
b.AppendValues([]arrow.Timestamp{t1},
[]bool{true})
- return array.NewRecord(schema,
[]arrow.Array{b.NewArray()}, 1)
+ return array.NewRecordBatch(schema,
[]arrow.Array{b.NewArray()}, 1)
},
val: func(rq *require.Assertions) expr.Literal {
v, err :=
arrow.TimestampFromString("2021-01-01T11:00:00.000000Z", arrow.Microsecond)
@@ -595,7 +595,7 @@ func Test_Types(t *testing.T) {
return arrow.NewSchema([]arrow.Field{field},
nil)
},
- record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.Record {
+ record: func(rq *require.Assertions, schema
*arrow.Schema) arrow.RecordBatch {
b :=
array.NewDecimal128Builder(memory.DefaultAllocator,
&arrow.Decimal128Type{Precision: 38, Scale: 10})
defer b.Release()
@@ -604,7 +604,7 @@ func Test_Types(t *testing.T) {
b.Append(d)
- return array.NewRecord(schema,
[]arrow.Array{b.NewArray()}, 1)
+ return array.NewRecordBatch(schema,
[]arrow.Array{b.NewArray()}, 1)
},
val: func(rq *require.Assertions) expr.Literal {
v, p, s, err :=
expr.DecimalStringToBytes("456.7890123456")
diff --git a/arrow/compute/fieldref_test.go b/arrow/compute/fieldref_test.go
index ce8a906b..502ea4f8 100644
--- a/arrow/compute/fieldref_test.go
+++ b/arrow/compute/fieldref_test.go
@@ -267,7 +267,7 @@ func TestFieldRefRecord(t *testing.T) {
gamma := gammaBldr.NewStructArray()
defer gamma.Release()
- rec := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ rec := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "alpha", Type: alpha.DataType(), Nullable: true},
{Name: "alpha", Type: beta.DataType(), Nullable: true},
{Name: "alpha", Type: gamma.DataType(), Nullable: true},
diff --git a/arrow/compute/selection.go b/arrow/compute/selection.go
index 77ab8a6d..8839a481 100644
--- a/arrow/compute/selection.go
+++ b/arrow/compute/selection.go
@@ -194,7 +194,7 @@ func takeRecordImpl(ctx context.Context, opts
FunctionOptions, args ...Datum) (D
return nil, err
}
- outRec := array.NewRecord(rb.Schema(), cols, nrows)
+ outRec := array.NewRecordBatch(rb.Schema(), cols, nrows)
return &RecordDatum{Value: outRec}, nil
}
@@ -619,7 +619,7 @@ func FilterRecordBatch(ctx context.Context, batch
arrow.RecordBatch, filter arro
return nil, err
}
- return array.NewRecord(batch.Schema(), cols, int64(indicesArr.Len())),
nil
+ return array.NewRecordBatch(batch.Schema(), cols,
int64(indicesArr.Len())), nil
}
func FilterTable(ctx context.Context, tbl arrow.Table, filter Datum, opts
*FilterOptions) (arrow.Table, error) {
diff --git a/arrow/compute/vector_selection_test.go
b/arrow/compute/vector_selection_test.go
index f5b56906..af3ba569 100644
--- a/arrow/compute/vector_selection_test.go
+++ b/arrow/compute/vector_selection_test.go
@@ -820,7 +820,7 @@ type FilterKernelWithRecordBatch struct {
FilterKernelTestSuite
}
-func (f *FilterKernelWithRecordBatch) doFilter(sc *arrow.Schema, batchJSON,
selection string, opts compute.FilterOptions) (arrow.Record, error) {
+func (f *FilterKernelWithRecordBatch) doFilter(sc *arrow.Schema, batchJSON,
selection string, opts compute.FilterOptions) (arrow.RecordBatch, error) {
rec, _, err := array.RecordFromJSON(f.mem, sc,
strings.NewReader(batchJSON), array.WithUseNumber())
if err != nil {
return nil, err
@@ -1464,7 +1464,7 @@ type TakeKernelTestRecord struct {
TakeKernelTestTyped
}
-func (tk *TakeKernelTestRecord) takeJSON(schm *arrow.Schema, batchJSON string,
indexType arrow.DataType, indices string) (arrow.Record, error) {
+func (tk *TakeKernelTestRecord) takeJSON(schm *arrow.Schema, batchJSON string,
indexType arrow.DataType, indices string) (arrow.RecordBatch, error) {
batch, _, err := array.RecordFromJSON(tk.mem, schm,
strings.NewReader(batchJSON))
tk.Require().NoError(err)
defer batch.Release()
diff --git a/arrow/csv/example_csv_test.go b/arrow/csv/example_csv_test.go
index 1fc3b18f..268e6325 100644
--- a/arrow/csv/example_csv_test.go
+++ b/arrow/csv/example_csv_test.go
@@ -66,7 +66,7 @@ func Example_reader() {
}
// Get the record but don't release it - the reader will handle that
- record := reader.Record()
+ record := reader.RecordBatch()
fmt.Printf("Number of rows: %d\n", record.NumRows())
fmt.Printf("Number of columns: %d\n", record.NumCols())
diff --git a/arrow/csv/reader.go b/arrow/csv/reader.go
index d83842b9..9f3c7724 100644
--- a/arrow/csv/reader.go
+++ b/arrow/csv/reader.go
@@ -282,7 +282,7 @@ func (r *Reader) next1() bool {
r.validate(recs)
r.read(recs)
- r.cur = r.bld.NewRecord()
+ r.cur = r.bld.NewRecordBatch()
return true
}
@@ -305,7 +305,7 @@ func (r *Reader) nextall() bool {
r.validate(rec)
r.read(rec)
}
- r.cur = r.bld.NewRecord()
+ r.cur = r.bld.NewRecordBatch()
return true
}
@@ -338,7 +338,7 @@ func (r *Reader) nextn() bool {
r.done = true
}
- r.cur = r.bld.NewRecord()
+ r.cur = r.bld.NewRecordBatch()
return n > 0
}
diff --git a/arrow/csv/reader_test.go b/arrow/csv/reader_test.go
index 9d3e409b..60af76fa 100644
--- a/arrow/csv/reader_test.go
+++ b/arrow/csv/reader_test.go
@@ -63,7 +63,7 @@ func Example() {
n := 0
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
for i, col := range rec.Columns() {
fmt.Printf("rec[%d][%q]: %v\n", n, rec.ColumnName(i),
col)
}
@@ -141,7 +141,7 @@ func Example_withChunk() {
n := 0
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
for i, col := range rec.Columns() {
fmt.Printf("rec[%d][%q]: %v\n", n, rec.ColumnName(i),
col)
}
@@ -255,7 +255,7 @@ func TestCSVReaderParseError(t *testing.T) {
if rec != nil {
rec.Release()
}
- rec = r.Record()
+ rec = r.RecordBatch()
rec.Retain()
if n == 1 && r.Err() == nil {
@@ -380,7 +380,7 @@ func testCSVReader(t *testing.T, filepath string,
withHeader bool, stringsCanBeN
out := new(bytes.Buffer)
n := 0
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
for i, col := range rec.Columns() {
fmt.Fprintf(out, "rec[%d][%q]: %v\n", n,
rec.ColumnName(i), col)
}
@@ -490,7 +490,7 @@ rec[2]["date64"]: [(null)]
)
r.Next()
- r.Record()
+ r.RecordBatch()
r.Release()
}
@@ -723,7 +723,7 @@ rec[0]["str"]: ["str-0" "str-1" "str-2" "str-3" "str-4"
"str-5" "str-6" "str-7"
n := 0
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
for i, col := range rec.Columns() {
fmt.Fprintf(out, "rec[%d][%q]: %v\n",
n, rec.ColumnName(i), col)
}
@@ -759,7 +759,7 @@ func TestReadCSVDecimalCols(t *testing.T) {
defer r.Release()
assert.True(t, r.Next())
- rec := r.Record()
+ rec := r.RecordBatch()
rec.Retain()
assert.False(t, r.Next())
defer rec.Release()
@@ -781,7 +781,7 @@ func TestReadCSVDecimalCols(t *testing.T) {
dec256Bldr.Append(decimal256.FromI64(-123))
dec256Bldr.Append(decimal256.FromU64(12300000000))
- exRec := bldr.NewRecord()
+ exRec := bldr.NewRecordBatch()
defer exRec.Release()
assert.Truef(t, array.RecordEqual(exRec, rec), "expected: %s\nactual:
%s", exRec, rec)
@@ -843,7 +843,7 @@ func benchRead(b *testing.B, raw []byte, rows, cols, chunks
int) {
n := int64(0)
for r.Next() {
- n += r.Record().NumRows()
+ n += r.RecordBatch().NumRows()
}
r.Release()
@@ -899,13 +899,13 @@ func TestInferringSchema(t *testing.T) {
assert.True(t, expSchema.Equal(r.Schema()), expSchema.String(),
r.Schema().String())
// verify first row:
- assertRowEqual(exp, r.Record(), 0)
+ assertRowEqual(exp, r.RecordBatch(), 0)
assert.True(t, r.Next())
- assertRowEqual(exp, r.Record(), 1)
+ assertRowEqual(exp, r.RecordBatch(), 1)
assert.True(t, r.Next())
- assertRowEqual(exp, r.Record(), 2)
+ assertRowEqual(exp, r.RecordBatch(), 2)
assert.True(t, r.Next())
- assertRowEqual(exp, r.Record(), 3)
+ assertRowEqual(exp, r.RecordBatch(), 3)
assert.False(t, r.Next())
}
@@ -930,7 +930,7 @@ func TestInferCSVOptions(t *testing.T) {
defer r.Release()
assert.True(t, r.Next())
- rec := r.Record()
+ rec := r.RecordBatch()
rec.Retain()
defer rec.Release()
assert.False(t, r.Next())
diff --git a/arrow/csv/writer_test.go b/arrow/csv/writer_test.go
index ef154c8a..e227b3b7 100644
--- a/arrow/csv/writer_test.go
+++ b/arrow/csv/writer_test.go
@@ -70,7 +70,7 @@ func Example_writer() {
b.Field(1).(*array.Float64Builder).AppendValues([]float64{0, 1, 2, 3,
4, 5, 6, 7, 8, 9}, nil)
b.Field(2).(*array.StringBuilder).AppendValues([]string{"str-0",
"str-1", "str-2", "str-3", "str-4", "str-5", "str-6", "str-7", "str-8",
"str-9"}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
w := csv.NewWriter(f, schema, csv.WithComma(';'))
@@ -94,7 +94,7 @@ func Example_writer() {
n := 0
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
for i, col := range rec.Columns() {
fmt.Printf("rec[%d][%q]: %v\n", n, rec.ColumnName(i),
col)
}
@@ -300,7 +300,7 @@ func testCSVWriter(t *testing.T, data [][]string,
writeHeader bool, fmtr func(bo
field.AppendNull()
}
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
w := csv.NewWriter(f, schema,
@@ -419,7 +419,7 @@ func BenchmarkWrite(b *testing.B) {
bldr.Field(15).(*array.Decimal256Builder).Append(decimal256.FromI64(int64(i)))
}
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
w := csv.NewWriter(io.Discard, schema, csv.WithComma(';'),
csv.WithCRLF(false))
@@ -470,7 +470,7 @@ func TestParquetTestingCSVWriter(t *testing.T) {
require.NoError(t, err)
for recordReader.Next() {
- rec := recordReader.Record()
+ rec := recordReader.RecordBatch()
err := csvWriter.Write(rec)
require.NoError(t, err)
}
@@ -519,7 +519,7 @@ func TestParquetTestingCSVWriter(t *testing.T) {
require.NoError(t, err)
for recordReader.Next() {
- rec := recordReader.Record()
+ rec := recordReader.RecordBatch()
err := csvWriter.Write(rec)
require.NoError(t, err)
}
@@ -617,7 +617,7 @@ func TestCustomTypeConversion(t *testing.T) {
require.NoError(t, err)
for recordReader.Next() {
- rec := recordReader.Record()
+ rec := recordReader.RecordBatch()
err := csvWriter.Write(rec)
require.NoError(t, err)
}
diff --git a/arrow/example_table_creation_test.go
b/arrow/example_table_creation_test.go
index 1d823e45..f59ad131 100644
--- a/arrow/example_table_creation_test.go
+++ b/arrow/example_table_creation_test.go
@@ -44,7 +44,7 @@ func Example_tableCreation() {
builder.Field(2).(*array.Float64Builder).AppendValues([]float64{1, 0,
3, 0, 5}, []bool{true, false, true, false, true})
// Create a record
- rec := builder.NewRecord()
+ rec := builder.NewRecordBatch()
defer rec.Release()
// Create a table from the record
diff --git a/arrow/example_test.go b/arrow/example_test.go
index 822ea9ab..2b0dc5d4 100644
--- a/arrow/example_test.go
+++ b/arrow/example_test.go
@@ -476,7 +476,7 @@ func Example_record() {
b.Field(0).(*array.Int32Builder).AppendValues([]int32{7, 8, 9, 10},
[]bool{true, true, false, true})
b.Field(1).(*array.Float64Builder).AppendValues([]float64{1, 2, 3, 4,
5, 6, 7, 8, 9, 10}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
for i, col := range rec.Columns() {
@@ -506,13 +506,13 @@ func Example_recordReader() {
b.Field(0).(*array.Int32Builder).AppendValues([]int32{7, 8, 9, 10},
[]bool{true, true, false, true})
b.Field(1).(*array.Float64Builder).AppendValues([]float64{1, 2, 3, 4,
5, 6, 7, 8, 9, 10}, nil)
- rec1 := b.NewRecord()
+ rec1 := b.NewRecordBatch()
defer rec1.Release()
b.Field(0).(*array.Int32Builder).AppendValues([]int32{11, 12, 13, 14,
15, 16, 17, 18, 19, 20}, nil)
b.Field(1).(*array.Float64Builder).AppendValues([]float64{11, 12, 13,
14, 15, 16, 17, 18, 19, 20}, nil)
- rec2 := b.NewRecord()
+ rec2 := b.NewRecordBatch()
defer rec2.Release()
itr, err := array.NewRecordReader(schema, []arrow.RecordBatch{rec1,
rec2})
@@ -523,7 +523,7 @@ func Example_recordReader() {
n := 0
for itr.Next() {
- rec := itr.Record()
+ rec := itr.RecordBatch()
for i, col := range rec.Columns() {
fmt.Printf("rec[%d][%q]: %v\n", n, rec.ColumnName(i),
col)
}
@@ -555,13 +555,13 @@ func Example_table() {
b.Field(0).(*array.Int32Builder).AppendValues([]int32{7, 8, 9, 10},
[]bool{true, true, false, true})
b.Field(1).(*array.Float64Builder).AppendValues([]float64{1, 2, 3, 4,
5, 6, 7, 8, 9, 10}, nil)
- rec1 := b.NewRecord()
+ rec1 := b.NewRecordBatch()
defer rec1.Release()
b.Field(0).(*array.Int32Builder).AppendValues([]int32{11, 12, 13, 14,
15, 16, 17, 18, 19, 20}, nil)
b.Field(1).(*array.Float64Builder).AppendValues([]float64{11, 12, 13,
14, 15, 16, 17, 18, 19, 20}, nil)
- rec2 := b.NewRecord()
+ rec2 := b.NewRecordBatch()
defer rec2.Release()
tbl := array.NewTableFromRecords(schema, []arrow.RecordBatch{rec1,
rec2})
@@ -572,7 +572,7 @@ func Example_table() {
n := 0
for tr.Next() {
- rec := tr.Record()
+ rec := tr.RecordBatch()
for i, col := range rec.Columns() {
fmt.Printf("rec[%d][%q]: %v\n", n, rec.ColumnName(i),
col)
}
diff --git a/arrow/examples/table_creation/main.go
b/arrow/examples/table_creation/main.go
index 6f923b69..477fecf1 100644
--- a/arrow/examples/table_creation/main.go
+++ b/arrow/examples/table_creation/main.go
@@ -44,7 +44,7 @@ func main() {
builder.Field(2).(*array.Float64Builder).AppendValues([]float64{1, 0,
3, 0, 5}, []bool{true, false, true, false, true})
// Create a record
- rec := builder.NewRecord()
+ rec := builder.NewRecordBatch()
defer rec.Release()
// Create a table from the record
diff --git a/arrow/extensions/bool8_test.go b/arrow/extensions/bool8_test.go
index 96291209..e9c6ebbc 100644
--- a/arrow/extensions/bool8_test.go
+++ b/arrow/extensions/bool8_test.go
@@ -72,7 +72,7 @@ func TestBool8ExtensionRecordBuilder(t *testing.T) {
defer builder.Release()
builder.Field(0).(*extensions.Bool8Builder).Append(true)
- record := builder.NewRecord()
+ record := builder.NewRecordBatch()
defer record.Release()
b, err := record.MarshalJSON()
@@ -86,7 +86,7 @@ func TestBool8ExtensionRecordBuilder(t *testing.T) {
require.Equal(t, record, record1)
require.NoError(t, builder.UnmarshalJSON([]byte(`{"bool8":true}`)))
- record = builder.NewRecord()
+ record = builder.NewRecordBatch()
defer record.Release()
require.Equal(t, schema, record.Schema())
@@ -186,7 +186,7 @@ func TestBool8TypeBatchIPCRoundTrip(t *testing.T) {
arr := array.NewExtensionArrayWithStorage(typ, storage)
defer arr.Release()
- batch := array.NewRecord(arrow.NewSchema([]arrow.Field{{Name: "field",
Type: typ, Nullable: true}}, nil),
+ batch := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Name:
"field", Type: typ, Nullable: true}}, nil),
[]arrow.Array{arr}, -1)
defer batch.Release()
diff --git a/arrow/extensions/json_test.go b/arrow/extensions/json_test.go
index d25ac104..26822cdf 100644
--- a/arrow/extensions/json_test.go
+++ b/arrow/extensions/json_test.go
@@ -144,7 +144,7 @@ func TestJSONTypeBatchIPCRoundTrip(t *testing.T) {
arr := array.NewExtensionArrayWithStorage(typ, storage)
defer arr.Release()
- batch :=
array.NewRecord(arrow.NewSchema([]arrow.Field{{Name: "field", Type: typ,
Nullable: true}}, nil),
+ batch :=
array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Name: "field", Type: typ,
Nullable: true}}, nil),
[]arrow.Array{arr}, -1)
defer batch.Release()
@@ -239,7 +239,7 @@ func TestJSONRecordToJSON(t *testing.T) {
jsonArr, ok := arr.(*extensions.JSONArray)
require.True(t, ok)
- rec :=
array.NewRecord(arrow.NewSchema([]arrow.Field{{Name: "json", Type: typ,
Nullable: true}}, nil), []arrow.Array{jsonArr}, 6)
+ rec :=
array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Name: "json", Type: typ,
Nullable: true}}, nil), []arrow.Array{jsonArr}, 6)
defer rec.Release()
buf := bytes.NewBuffer([]byte("\n")) // expected output
has leading newline for clearer formatting
diff --git a/arrow/extensions/opaque_test.go b/arrow/extensions/opaque_test.go
index e2a01257..8d19d361 100644
--- a/arrow/extensions/opaque_test.go
+++ b/arrow/extensions/opaque_test.go
@@ -169,7 +169,7 @@ func TestOpaqueTypeBatchRoundTrip(t *testing.T) {
arr := array.NewExtensionArrayWithStorage(typ, storage)
defer arr.Release()
- batch := array.NewRecord(arrow.NewSchema([]arrow.Field{{Name: "field",
Type: typ, Nullable: true}}, nil),
+ batch := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Name:
"field", Type: typ, Nullable: true}}, nil),
[]arrow.Array{arr}, -1)
defer batch.Release()
diff --git a/arrow/extensions/uuid_test.go b/arrow/extensions/uuid_test.go
index c3508308..a76b77a9 100644
--- a/arrow/extensions/uuid_test.go
+++ b/arrow/extensions/uuid_test.go
@@ -68,7 +68,7 @@ func TestUUIDExtensionRecordBuilder(t *testing.T) {
builder.Field(0).(*extensions.UUIDBuilder).Append(testUUID)
builder.Field(0).(*extensions.UUIDBuilder).AppendNull()
builder.Field(0).(*extensions.UUIDBuilder).Append(testUUID)
- record := builder.NewRecord()
+ record := builder.NewRecordBatch()
b, err := record.MarshalJSON()
require.NoError(t, err)
require.Equal(t,
"[{\"uuid\":\""+testUUID.String()+"\"}\n,{\"uuid\":null}\n,{\"uuid\":\""+testUUID.String()+"\"}\n]",
string(b))
@@ -159,7 +159,7 @@ func TestUUIDTypeBatchIPCRoundTrip(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
- batch := array.NewRecord(arrow.NewSchema([]arrow.Field{{Name: "field",
Type: typ, Nullable: true}}, nil),
+ batch := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Name:
"field", Type: typ, Nullable: true}}, nil),
[]arrow.Array{arr}, -1)
defer batch.Release()
@@ -233,7 +233,7 @@ func TestUUIDRecordToJSON(t *testing.T) {
uuidArr, ok := arr.(*extensions.UUIDArray)
require.True(t, ok)
- rec := array.NewRecord(arrow.NewSchema([]arrow.Field{{Name: "uuid",
Type: typ, Nullable: true}}, nil), []arrow.Array{uuidArr}, 3)
+ rec := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{{Name:
"uuid", Type: typ, Nullable: true}}, nil), []arrow.Array{uuidArr}, 3)
defer rec.Release()
buf := bytes.NewBuffer([]byte("\n")) // expected output has leading
newline for clearer formatting
diff --git a/arrow/flight/flightsql/client.go b/arrow/flight/flightsql/client.go
index cdba3620..2610da39 100644
--- a/arrow/flight/flightsql/client.go
+++ b/arrow/flight/flightsql/client.go
@@ -280,7 +280,7 @@ func (c *Client) ExecuteIngest(ctx context.Context, rdr
array.RecordReader, reqO
wr.SetFlightDescriptor(desc)
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
err = wr.Write(rec)
if err == io.EOF {
// gRPC returns io.EOF if the error was generated by
the server.
@@ -1223,7 +1223,7 @@ func (p *PreparedStatement) ExecuteUpdate(ctx
context.Context, opts ...grpc.Call
schema := arrow.NewSchema([]arrow.Field{}, nil)
wr = flight.NewRecordWriter(pstream, ipc.WithSchema(schema))
wr.SetFlightDescriptor(desc)
- rec := array.NewRecord(schema, []arrow.Array{}, 0)
+ rec := array.NewRecordBatch(schema, []arrow.Array{}, 0)
if err = wr.Write(rec); err != nil {
return
}
@@ -1291,7 +1291,7 @@ func (p *PreparedStatement)
writeBindParametersToStream(pstream pb.FlightService
wr := flight.NewRecordWriter(pstream,
ipc.WithSchema(p.streamBinding.Schema()))
wr.SetFlightDescriptor(desc)
for p.streamBinding.Next() {
- if err := wr.Write(p.streamBinding.Record()); err !=
nil {
+ if err := wr.Write(p.streamBinding.RecordBatch()); err
!= nil {
return nil, err
}
}
diff --git a/arrow/flight/flightsql/client_test.go
b/arrow/flight/flightsql/client_test.go
index 279f5ee3..a8ba228b 100644
--- a/arrow/flight/flightsql/client_test.go
+++ b/arrow/flight/flightsql/client_test.go
@@ -571,7 +571,7 @@ func (s *FlightSqlClientSuite)
TestPreparedStatementExecuteReaderBinding() {
s.NoError(err)
defer rec.Release()
- rdr, err := array.NewRecordReader(rec.Schema(), []arrow.Record{rec,
rec, rec})
+ rdr, err := array.NewRecordReader(rec.Schema(),
[]arrow.RecordBatch{rec, rec, rec})
s.NoError(err)
prepared.SetRecordReader(rdr)
diff --git a/arrow/flight/flightsql/driver/driver.go
b/arrow/flight/flightsql/driver/driver.go
index 0e8f881f..d039408d 100644
--- a/arrow/flight/flightsql/driver/driver.go
+++ b/arrow/flight/flightsql/driver/driver.go
@@ -43,9 +43,9 @@ type Rows struct {
// schema stores the row schema, like column names.
schema *arrow.Schema
// recordChan enables async reading from server, while client interates.
- recordChan chan arrow.Record
+ recordChan chan arrow.RecordBatch
// currentRecord stores a record with n>=0 rows.
- currentRecord arrow.Record
+ currentRecord arrow.RecordBatch
// currentRow tracks the position (row) within currentRecord.
currentRow uint64
// initializedChan prevents the row being used before properly
initialized.
@@ -59,7 +59,7 @@ type Rows struct {
func newRows() *Rows {
return &Rows{
- recordChan: make(chan arrow.Record,
recordChanBufferSizeDefault),
+ recordChan: make(chan arrow.RecordBatch,
recordChanBufferSizeDefault),
initializedChan: make(chan bool),
}
}
@@ -324,7 +324,7 @@ func (s *Stmt) setParameters(args []driver.NamedValue)
error {
}
}
- rec := recBuilder.NewRecord()
+ rec := recBuilder.NewRecordBatch()
defer rec.Release()
s.stmt.SetParameters(rec)
@@ -555,7 +555,7 @@ func (r *Rows) streamRecordset(ctx context.Context, c
*flightsql.Client, endpoin
return
}
- record := reader.Record()
+ record := reader.RecordBatch()
record.Retain()
if record.NumRows() < 1 {
diff --git a/arrow/flight/flightsql/driver/utils_test.go
b/arrow/flight/flightsql/driver/utils_test.go
index 0cd73fb7..d14f00c4 100644
--- a/arrow/flight/flightsql/driver/utils_test.go
+++ b/arrow/flight/flightsql/driver/utils_test.go
@@ -99,7 +99,7 @@ func Test_fromArrowType(t *testing.T) {
b.Field(19).(*array.DurationBuilder).Append(1)
b.Field(20).(*array.DurationBuilder).Append(1)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
tf := func(t *testing.T, idx int, want any) {
diff --git a/arrow/flight/flightsql/example/sql_batch_reader.go
b/arrow/flight/flightsql/example/sql_batch_reader.go
index f3dce1df..03c07b2e 100644
--- a/arrow/flight/flightsql/example/sql_batch_reader.go
+++ b/arrow/flight/flightsql/example/sql_batch_reader.go
@@ -345,6 +345,6 @@ func (r *SqlBatchReader) Next() bool {
rows++
}
- r.record = r.bldr.NewRecord()
+ r.record = r.bldr.NewRecordBatch()
return rows > 0
}
diff --git a/arrow/flight/flightsql/example/sqlite_server.go
b/arrow/flight/flightsql/example/sqlite_server.go
index d5f61980..fc7d76a2 100644
--- a/arrow/flight/flightsql/example/sqlite_server.go
+++ b/arrow/flight/flightsql/example/sqlite_server.go
@@ -281,7 +281,7 @@ func (s *SQLiteFlightSQLServer)
DoGetCatalogs(context.Context) (*arrow.Schema, <
}
defer catalogs.Release()
- batch := array.NewRecord(schema, []arrow.Array{catalogs}, 1)
+ batch := array.NewRecordBatch(schema, []arrow.Array{catalogs}, 1)
ch := make(chan flight.StreamChunk, 1)
ch <- flight.StreamChunk{Data: batch}
@@ -313,7 +313,7 @@ func (s *SQLiteFlightSQLServer) DoGetDBSchemas(_
context.Context, cmd flightsql.
}
defer dbSchemas.Release()
- batch := array.NewRecord(schema, []arrow.Array{catalogs,
dbSchemas}, 1)
+ batch := array.NewRecordBatch(schema, []arrow.Array{catalogs,
dbSchemas}, 1)
ch <- flight.StreamChunk{Data: batch}
}
@@ -589,7 +589,7 @@ func scalarToIFace(s scalar.Scalar) (interface{}, error) {
func getParamsForStatement(rdr flight.MessageReader) (params [][]interface{},
err error) {
params = make([][]interface{}, 0)
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
nrows := int(rec.NumRows())
ncols := int(rec.NumCols())
diff --git
a/arrow/flight/flightsql/example/sqlite_tables_schema_batch_reader.go
b/arrow/flight/flightsql/example/sqlite_tables_schema_batch_reader.go
index 99840b5d..4bb9bdcf 100644
--- a/arrow/flight/flightsql/example/sqlite_tables_schema_batch_reader.go
+++ b/arrow/flight/flightsql/example/sqlite_tables_schema_batch_reader.go
@@ -160,7 +160,7 @@ func (s *SqliteTablesSchemaBatchReader) Next() bool {
return false
}
- rec := s.rdr.Record()
+ rec := s.rdr.RecordBatch()
tableNameArr :=
rec.Column(rec.Schema().FieldIndices("table_name")[0]).(*array.String)
bldr := flightsql.NewColumnMetadataBuilder()
@@ -206,6 +206,6 @@ func (s *SqliteTablesSchemaBatchReader) Next() bool {
schemaCol := s.schemaBldr.NewArray()
defer schemaCol.Release()
- s.record = array.NewRecord(s.Schema(), append(rec.Columns(),
schemaCol), rec.NumRows())
+ s.record = array.NewRecordBatch(s.Schema(), append(rec.Columns(),
schemaCol), rec.NumRows())
return true
}
diff --git a/arrow/flight/flightsql/example/type_info.go
b/arrow/flight/flightsql/example/type_info.go
index 35ecef65..d9a12f0f 100644
--- a/arrow/flight/flightsql/example/type_info.go
+++ b/arrow/flight/flightsql/example/type_info.go
@@ -90,7 +90,7 @@ func GetTypeInfoResult(mem memory.Allocator)
arrow.RecordBatch {
numPrecRadix := zeroIntArray
intervalPrecision := zeroIntArray
- return array.NewRecord(schema_ref.XdbcTypeInfo, []arrow.Array{
+ return array.NewRecordBatch(schema_ref.XdbcTypeInfo, []arrow.Array{
typeNames, dataType, columnSize, literalPrefix, literalSuffix,
createParams, nullable, caseSensitive, searchable,
unsignedAttribute,
fixedPrecScale, autoUniqueVal, localTypeName, minimalScale,
maximumScale,
diff --git a/arrow/flight/flightsql/server.go b/arrow/flight/flightsql/server.go
index b76b1e00..d5102a27 100644
--- a/arrow/flight/flightsql/server.go
+++ b/arrow/flight/flightsql/server.go
@@ -419,12 +419,12 @@ func (b *BaseServer) DoGetSqlInfo(_ context.Context, cmd
GetSqlInfo) (*arrow.Sch
}
}
- batch := bldr.NewRecord()
+ batch := bldr.NewRecordBatch()
defer batch.Release()
debug.Assert(int(batch.NumRows()) == len(cmd.GetInfo()), "too many rows
added to SqlInfo result")
ch := make(chan flight.StreamChunk)
- rdr, err := array.NewRecordReader(schema_ref.SqlInfo,
[]arrow.Record{batch})
+ rdr, err := array.NewRecordReader(schema_ref.SqlInfo,
[]arrow.RecordBatch{batch})
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "error producing
record response: %s", err.Error())
}
diff --git a/arrow/flight/flightsql/server_test.go
b/arrow/flight/flightsql/server_test.go
index ba11b8b2..e3b51f92 100644
--- a/arrow/flight/flightsql/server_test.go
+++ b/arrow/flight/flightsql/server_test.go
@@ -102,11 +102,11 @@ func (*testServer) DoGetStatement(ctx context.Context,
ticket flightsql.Statemen
b.AppendNull()
c := make(chan flight.StreamChunk, 2)
c <- flight.StreamChunk{
- Data: array.NewRecord(sc, []arrow.Array{b.NewArray()},
1),
+ Data: array.NewRecordBatch(sc,
[]arrow.Array{b.NewArray()}, 1),
}
b.Append(1)
c <- flight.StreamChunk{
- Data: array.NewRecord(sc, []arrow.Array{b.NewArray()},
1),
+ Data: array.NewRecordBatch(sc,
[]arrow.Array{b.NewArray()}, 1),
}
close(c)
cc = c
@@ -120,7 +120,7 @@ func (*testServer) DoGetStatement(ctx context.Context,
ticket flightsql.Statemen
b.Append(2)
c := make(chan flight.StreamChunk, 2)
c <- flight.StreamChunk{
- Data: array.NewRecord(sc, []arrow.Array{b.NewArray()},
1),
+ Data: array.NewRecordBatch(sc,
[]arrow.Array{b.NewArray()}, 1),
}
c <- flight.StreamChunk{
Err: status.Error(codes.Internal, "test error"),
@@ -182,7 +182,7 @@ func (*testServer) DoPutCommandStatementIngest(ctx
context.Context, cmd flightsq
var maxRows int64 = 50
var nRows int64
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
if nRows+rec.NumRows() > maxRows {
return nRows, fmt.Errorf("ingested rows exceeded
maximum of %d", maxRows)
}
@@ -236,9 +236,9 @@ func (s *FlightSqlServerSuite) TestExecute() {
s.Require().Len(ep, 1)
fr, err := s.cl.DoGet(context.TODO(), ep[0].GetTicket())
s.Require().NoError(err)
- var recs []arrow.Record
+ var recs []arrow.RecordBatch
for fr.Next() {
- rec := fr.Record()
+ rec := fr.RecordBatch()
rec.Retain()
defer rec.Release()
recs = append(recs, rec)
@@ -309,7 +309,7 @@ func (s *FlightSqlServerSuite) TestExecuteIngestNil() {
}
func (s *FlightSqlServerSuite) TestExecuteIngestInvalid() {
- reclist := []arrow.Record{}
+ reclist := []arrow.RecordBatch{}
rdr, err := array.NewRecordReader(arrow.NewSchema([]arrow.Field{},
nil), reclist)
s.NoError(err)
defer rdr.Release()
@@ -379,7 +379,7 @@ func (s *FlightSqlServerSuite)
TestExecuteIngestWithServerError() {
s.Equal(nRowsExpected, nRowsIngested)
}
-func generateRecords(alloc memory.Allocator, nRecords, nRowsPerRecord int)
[]arrow.Record {
+func generateRecords(alloc memory.Allocator, nRecords, nRowsPerRecord int)
[]arrow.RecordBatch {
schema := arrow.NewSchema(
[]arrow.Field{
{Name: "one", Type: arrow.FixedWidthTypes.Boolean},
@@ -393,7 +393,7 @@ func generateRecords(alloc memory.Allocator, nRecords,
nRowsPerRecord int) []arr
defer bldr.Release()
var val int
- reclist := make([]arrow.Record, nRecords)
+ reclist := make([]arrow.RecordBatch, nRecords)
for i := 0; i < nRecords; i++ {
for j := 0; j < nRowsPerRecord; j++ {
bldr.Field(0).(*array.BooleanBuilder).Append(val%2 == 0)
@@ -401,7 +401,7 @@ func generateRecords(alloc memory.Allocator, nRecords,
nRowsPerRecord int) []arr
bldr.Field(2).(*array.Int64Builder).Append(int64(val))
val++
}
- reclist[i] = bldr.NewRecord()
+ reclist[i] = bldr.NewRecordBatch()
}
return reclist
}
diff --git a/arrow/flight/flightsql/sqlite_server_test.go
b/arrow/flight/flightsql/sqlite_server_test.go
index 90799bb2..7caae7a7 100644
--- a/arrow/flight/flightsql/sqlite_server_test.go
+++ b/arrow/flight/flightsql/sqlite_server_test.go
@@ -127,7 +127,7 @@ func (s *FlightSqliteServerSuite)
TestCommandStatementQuery() {
defer rdr.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
expectedSchema := arrow.NewSchema([]arrow.Field{
@@ -148,7 +148,7 @@ func (s *FlightSqliteServerSuite)
TestCommandStatementQuery() {
foreignarr := s.fromJSON(arrow.PrimitiveTypes.Int64, `[1, 1, 1, null]`)
defer foreignarr.Release()
- expectedRec := array.NewRecord(expectedSchema, []arrow.Array{idarr,
keyarr, valarr, foreignarr}, 4)
+ expectedRec := array.NewRecordBatch(expectedSchema,
[]arrow.Array{idarr, keyarr, valarr, foreignarr}, 4)
defer expectedRec.Release()
s.Truef(array.RecordEqual(expectedRec, rec), "expected: %s\ngot: %s",
expectedRec, rec)
@@ -175,11 +175,11 @@ func (s *FlightSqliteServerSuite) TestCommandGetTables() {
tableType := s.fromJSON(arrow.BinaryTypes.String, `["table", "table",
"table"]`)
defer tableType.Release()
- expectedRec := array.NewRecord(schema_ref.Tables,
[]arrow.Array{catalogName, schemaName, tableName, tableType}, 3)
+ expectedRec := array.NewRecordBatch(schema_ref.Tables,
[]arrow.Array{catalogName, schemaName, tableName, tableType}, 3)
defer expectedRec.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
rec.Retain()
defer rec.Release()
@@ -222,7 +222,7 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTablesWithTableFilter() {
schema := s.fromJSON(arrow.BinaryTypes.String, `[""]`)
table := s.fromJSON(arrow.BinaryTypes.String, `["intTable"]`)
tabletype := s.fromJSON(arrow.BinaryTypes.String, `["table"]`)
- expected := array.NewRecord(schema_ref.Tables, []arrow.Array{catalog,
schema, table, tabletype}, 1)
+ expected := array.NewRecordBatch(schema_ref.Tables,
[]arrow.Array{catalog, schema, table, tabletype}, 1)
defer func() {
catalog.Release()
schema.Release()
@@ -232,7 +232,7 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTablesWithTableFilter() {
}()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
rec.Retain()
defer rec.Release()
@@ -280,11 +280,11 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTablesWithExistingTableTypeFilte
tableType := s.fromJSON(arrow.BinaryTypes.String, `["table", "table",
"table"]`)
defer tableType.Release()
- expectedRec := array.NewRecord(schema_ref.Tables,
[]arrow.Array{catalogName, schemaName, tableName, tableType}, 3)
+ expectedRec := array.NewRecordBatch(schema_ref.Tables,
[]arrow.Array{catalogName, schemaName, tableName, tableType}, 3)
defer expectedRec.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
rec.Retain()
defer rec.Release()
@@ -328,7 +328,7 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTablesWithIncludedSchemas() {
binaryBldr.Append(schemaBuf)
schemaCol := binaryBldr.NewArray()
- expected := array.NewRecord(schema_ref.TablesWithIncludedSchema,
[]arrow.Array{catalog, schema, table, tabletype, schemaCol}, 1)
+ expected := array.NewRecordBatch(schema_ref.TablesWithIncludedSchema,
[]arrow.Array{catalog, schema, table, tabletype, schemaCol}, 1)
defer func() {
catalog.Release()
schema.Release()
@@ -340,7 +340,7 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTablesWithIncludedSchemas() {
}()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
rec.Retain()
defer rec.Release()
@@ -362,7 +362,7 @@ func (s *FlightSqliteServerSuite) TestCommandGetTypeInfo() {
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -379,7 +379,7 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTypeInfoFiltered() {
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -395,12 +395,12 @@ func (s *FlightSqliteServerSuite)
TestCommandGetCatalogs() {
s.True(rdr.Schema().Equal(schema_ref.Catalogs), rdr.Schema().String())
catalog := s.fromJSON(arrow.BinaryTypes.String, `["main"]`)
- expected := array.NewRecord(schema_ref.Catalogs,
[]arrow.Array{catalog}, 1)
+ expected := array.NewRecordBatch(schema_ref.Catalogs,
[]arrow.Array{catalog}, 1)
defer catalog.Release()
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
rec.Retain()
defer rec.Release()
@@ -421,13 +421,13 @@ func (s *FlightSqliteServerSuite)
TestCommandGetDbSchemas() {
catalog := s.fromJSON(arrow.BinaryTypes.String, `["main"]`)
schema := s.fromJSON(arrow.BinaryTypes.String, `[""]`)
- expected := array.NewRecord(schema_ref.DBSchemas,
[]arrow.Array{catalog, schema}, 1)
+ expected := array.NewRecordBatch(schema_ref.DBSchemas,
[]arrow.Array{catalog, schema}, 1)
defer catalog.Release()
defer schema.Release()
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.NotNil(rec)
rec.Retain()
defer rec.Release()
@@ -446,11 +446,11 @@ func (s *FlightSqliteServerSuite)
TestCommandGetTableTypes() {
expected := s.fromJSON(arrow.BinaryTypes.String, `["table"]`)
defer expected.Release()
- expectedRec := array.NewRecord(schema_ref.TableTypes,
[]arrow.Array{expected}, 1)
+ expectedRec := array.NewRecordBatch(schema_ref.TableTypes,
[]arrow.Array{expected}, 1)
defer expectedRec.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expectedRec, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -498,11 +498,11 @@ func (s *FlightSqliteServerSuite)
TestCommandPreparedStatementQuery() {
foreignIdArr := s.fromJSON(arrow.PrimitiveTypes.Int64, `[1, 1, 1,
null]`)
defer foreignIdArr.Release()
- expected := array.NewRecord(expectedSchema, []arrow.Array{idArr,
keyNameArr, valueArr, foreignIdArr}, 4)
+ expected := array.NewRecordBatch(expectedSchema, []arrow.Array{idArr,
keyNameArr, valueArr, foreignIdArr}, 4)
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -522,7 +522,7 @@ func (s *FlightSqliteServerSuite)
TestCommandPreparedStatementQueryWithParams()
paramArr, _ := array.NewDenseUnionFromArraysWithFields(typeIDs,
offsets, []arrow.Array{strArray, bytesArr, bigintArr, dblArr},
[]string{"string", "bytes", "bigint", "double"})
- batch := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ batch := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "parameter_1", Type: paramArr.DataType()}}, nil),
[]arrow.Array{paramArr}, 1)
defer func() {
@@ -557,11 +557,11 @@ func (s *FlightSqliteServerSuite)
TestCommandPreparedStatementQueryWithParams()
foreignIdArr := s.fromJSON(arrow.PrimitiveTypes.Int64, `[1, 1]`)
defer foreignIdArr.Release()
- expected := array.NewRecord(expectedSchema, []arrow.Array{idArr,
keyNameArr, valueArr, foreignIdArr}, 2)
+ expected := array.NewRecordBatch(expectedSchema, []arrow.Array{idArr,
keyNameArr, valueArr, foreignIdArr}, 2)
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -594,7 +594,7 @@ func (s *FlightSqliteServerSuite)
TestCommandPreparedStatementUpdateWithParams()
offsets, []arrow.Array{strArray, bytesArr, bigintArr, dblArr},
[]string{"string", "bytes", "bigint", "double"})
s.NoError(err)
- batch := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ batch := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "parameter_1", Type: paramArr.DataType()}}, nil),
[]arrow.Array{paramArr}, 1)
defer func() {
@@ -653,11 +653,11 @@ func (s *FlightSqliteServerSuite)
TestCommandGetPrimaryKeys() {
bldr.Field(3).(*array.StringBuilder).Append("id")
bldr.Field(4).(*array.Int32Builder).Append(1)
bldr.Field(5).AppendNull()
- expected := bldr.NewRecord()
+ expected := bldr.NewRecordBatch()
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -685,11 +685,11 @@ func (s *FlightSqliteServerSuite)
TestCommandGetImportedKeys() {
bldr.Field(10).AppendNull()
bldr.Field(11).(*array.Uint8Builder).Append(3)
bldr.Field(12).(*array.Uint8Builder).Append(3)
- expected := bldr.NewRecord()
+ expected := bldr.NewRecordBatch()
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -717,11 +717,11 @@ func (s *FlightSqliteServerSuite)
TestCommandGetExportedKeys() {
bldr.Field(10).AppendNull()
bldr.Field(11).(*array.Uint8Builder).Append(3)
bldr.Field(12).(*array.Uint8Builder).Append(3)
- expected := bldr.NewRecord()
+ expected := bldr.NewRecordBatch()
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -751,11 +751,11 @@ func (s *FlightSqliteServerSuite)
TestCommandGetCrossRef() {
bldr.Field(10).AppendNull()
bldr.Field(11).(*array.Uint8Builder).Append(3)
bldr.Field(12).(*array.Uint8Builder).Append(3)
- expected := bldr.NewRecord()
+ expected := bldr.NewRecordBatch()
defer expected.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
s.Truef(array.RecordEqual(expected, rec), "expected: %s\ngot: %s",
expected, rec)
s.False(rdr.Next())
}
@@ -823,7 +823,7 @@ func (s *FlightSqliteServerSuite) TestCommandGetSqlInfo() {
defer rdr.Release()
s.True(rdr.Next())
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
rec.Retain()
defer rec.Release()
s.False(rdr.Next())
@@ -863,9 +863,9 @@ func (s *FlightSqliteServerSuite) TestTransactions() {
toTable := func(r *flight.Reader) arrow.Table {
defer r.Release()
- recs := make([]arrow.Record, 0)
+ recs := make([]arrow.RecordBatch, 0)
for rdr.Next() {
- r := rdr.Record()
+ r := rdr.RecordBatch()
r.Retain()
defer r.Release()
recs = append(recs, r)
diff --git a/arrow/flight/record_batch_reader.go
b/arrow/flight/record_batch_reader.go
index 5f113682..7b744075 100644
--- a/arrow/flight/record_batch_reader.go
+++ b/arrow/flight/record_batch_reader.go
@@ -140,7 +140,7 @@ func (r *Reader) LatestFlightDescriptor() *FlightDescriptor
{
// this is just a convenience to retrieve all three with one function call.
func (r *Reader) Chunk() StreamChunk {
return StreamChunk{
- Data: r.Record(),
+ Data: r.RecordBatch(),
Desc: r.dmr.descr,
AppMetadata: r.dmr.lastAppMetadata,
}
@@ -227,7 +227,7 @@ func StreamChunksFromReader(rdr array.RecordReader, ch
chan<- StreamChunk) {
defer rdr.Release()
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
rec.Retain()
ch <- StreamChunk{Data: rec}
}
@@ -253,7 +253,7 @@ func ConcatenateReaders(rdrs []array.RecordReader, ch
chan<- StreamChunk) {
for _, r := range rdrs {
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
rec.Retain()
ch <- StreamChunk{Data: rec}
}
diff --git a/arrow/internal/arrdata/arrdata.go
b/arrow/internal/arrdata/arrdata.go
index ea4221e5..095571a8 100644
--- a/arrow/internal/arrdata/arrdata.go
+++ b/arrow/internal/arrdata/arrdata.go
@@ -99,7 +99,7 @@ func makeNullRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -182,7 +182,7 @@ func makePrimitiveRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -260,7 +260,7 @@ func makeStructsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -317,7 +317,7 @@ func makeListsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -374,7 +374,7 @@ func makeListViewsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -424,7 +424,7 @@ func makeFixedSizeListsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -463,7 +463,7 @@ func makeStringsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -564,7 +564,7 @@ func makeFixedWidthTypesRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -603,7 +603,7 @@ func makeFixedSizeBinariesRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -684,7 +684,7 @@ func makeIntervalsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -741,7 +741,7 @@ func makeDurationsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -791,7 +791,7 @@ func makeDecimal128sRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -836,7 +836,7 @@ func makeDecimal256sRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -958,7 +958,7 @@ func makeMapsRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -1037,7 +1037,7 @@ func makeExtensionRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -1101,8 +1101,8 @@ func makeUnionRecords() []arrow.RecordBatch {
defer dense2.Release()
return []arrow.RecordBatch{
- array.NewRecord(schema, []arrow.Array{sparse1, dense1}, -1),
- array.NewRecord(schema, []arrow.Array{sparse2, dense2}, -1)}
+ array.NewRecordBatch(schema, []arrow.Array{sparse1, dense1},
-1),
+ array.NewRecordBatch(schema, []arrow.Array{sparse2, dense2},
-1)}
}
func makeRunEndEncodedRecords() []arrow.RecordBatch {
@@ -1150,7 +1150,7 @@ func makeRunEndEncodedRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
@@ -1189,7 +1189,7 @@ func makeStringViewRecords() []arrow.RecordBatch {
recs := make([]arrow.RecordBatch, len(chunks))
for i, chunk := range chunks {
- recs[i] = array.NewRecord(schema, chunk, -1)
+ recs[i] = array.NewRecordBatch(schema, chunk, -1)
}
return recs
diff --git a/arrow/internal/arrdata/ioutil.go b/arrow/internal/arrdata/ioutil.go
index 2ef70436..39ec66f4 100644
--- a/arrow/internal/arrdata/ioutil.go
+++ b/arrow/internal/arrdata/ioutil.go
@@ -46,7 +46,7 @@ func CheckArrowFile(t *testing.T, f *os.File, mem
memory.Allocator, schema *arro
defer r.Close()
for i := 0; i < r.NumRecords(); i++ {
- rec, err := r.Record(i)
+ rec, err := r.RecordBatch(i)
if err != nil {
t.Fatalf("could not read record %d: %v", i, err)
}
@@ -80,7 +80,7 @@ func CheckArrowConcurrentFile(t *testing.T, f *os.File, mem
memory.Allocator, sc
errs := make(chan error, r.NumRecords())
checkRecord := func(i int) {
defer g.Done()
- rec, err := r.RecordAt(i)
+ rec, err := r.RecordBatchAt(i)
if err != nil {
errs <- fmt.Errorf("could not read record %d: %v", i,
err)
return
@@ -128,7 +128,7 @@ func CheckArrowStream(t *testing.T, f *os.File, mem
memory.Allocator, schema *ar
n := 0
for r.Next() {
- rec := r.Record()
+ rec := r.RecordBatch()
if !array.RecordEqual(rec, recs[n]) {
t.Fatalf("records[%d] differ, got: %s, expected %s", n,
rec, recs[n])
}
diff --git a/arrow/internal/arrjson/arrjson.go
b/arrow/internal/arrjson/arrjson.go
index 4a1da61c..864356d7 100644
--- a/arrow/internal/arrjson/arrjson.go
+++ b/arrow/internal/arrjson/arrjson.go
@@ -816,7 +816,7 @@ func recordFromJSON(mem memory.Allocator, schema
*arrow.Schema, rec Record, memo
defer d.Release()
defer cols[i].Release()
}
- return array.NewRecord(schema, cols, int64(rec.Count))
+ return array.NewRecordBatch(schema, cols, int64(rec.Count))
}
func recordToJSON(rec arrow.RecordBatch) Record {
diff --git a/arrow/internal/arrjson/writer.go b/arrow/internal/arrjson/writer.go
index fa0477aa..351a0cf5 100644
--- a/arrow/internal/arrjson/writer.go
+++ b/arrow/internal/arrjson/writer.go
@@ -71,7 +71,7 @@ func (w *Writer) Write(rec arrow.RecordBatch) error {
for _, p := range pairs {
defer p.Dict.Release()
sc := arrow.NewSchema([]arrow.Field{{Name:
fmt.Sprintf("DICT%d", p.ID), Type: p.Dict.DataType(), Nullable: true}}, nil)
- dummy := array.NewRecord(sc, []arrow.Array{p.Dict},
int64(p.Dict.Len()))
+ dummy := array.NewRecordBatch(sc,
[]arrow.Array{p.Dict}, int64(p.Dict.Len()))
defer dummy.Release()
w.raw.Dictionaries = append(w.raw.Dictionaries,
Dictionary{ID: p.ID, Data: recordToJSON(dummy)})
}
diff --git a/arrow/internal/flight_integration/scenario.go
b/arrow/internal/flight_integration/scenario.go
index 106669e9..d6ac7314 100644
--- a/arrow/internal/flight_integration/scenario.go
+++ b/arrow/internal/flight_integration/scenario.go
@@ -127,7 +127,7 @@ func consumeFlightLocation(ctx context.Context, loc
*flight.Location, tkt *fligh
return fmt.Errorf("got fewer batches than expected,
received so far: %d, expected: %d", i, len(orig))
}
- if !array.RecordEqual(chunk, rdr.Record()) {
+ if !array.RecordEqual(chunk, rdr.RecordBatch()) {
return fmt.Errorf("batch %d doesn't match", i)
}
@@ -334,7 +334,7 @@ func (s *defaultIntegrationTester) DoPut(stream
flight.FlightService_DoPutServer
dataset.schema = rdr.Schema()
dataset.chunks = make([]arrow.RecordBatch, 0)
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
rec.Retain()
dataset.chunks = append(dataset.chunks, rec)
@@ -587,7 +587,7 @@ func (o *orderedScenarioTester) RunClient(addr string, opts
...grpc.DialOption)
defer rdr.Release()
for rdr.Next() {
- record := rdr.Record()
+ record := rdr.RecordBatch()
record.Retain()
defer record.Release()
recs = append(recs, record)
@@ -702,7 +702,7 @@ func (o *orderedScenarioTester) DoGet(tkt *flight.Ticket,
fs flight.FlightServic
b.Field(0).(*array.Int32Builder).AppendValues([]int32{100, 200,
300}, nil)
}
w := flight.NewRecordWriter(fs, ipc.WithSchema(schema))
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
w.Write(rec)
@@ -816,7 +816,7 @@ func (tester *expirationTimeScenarioTester) DoGet(tkt
*flight.Ticket, fs flight.
defer b.Release()
b.Field(0).(*array.Uint32Builder).AppendValues([]uint32{uint32(index)},
nil)
w := flight.NewRecordWriter(fs, ipc.WithSchema(schema))
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
w.Write(rec)
@@ -962,7 +962,7 @@ func (tester *expirationTimeDoGetScenarioTester)
RunClient(addr string, opts ...
defer rdr.Release()
for rdr.Next() {
- record := rdr.Record()
+ record := rdr.RecordBatch()
record.Retain()
defer record.Release()
recs = append(recs, record)
@@ -1673,7 +1673,7 @@ func (m *flightSqlScenarioTester)
ValidatePreparedStatementExecution(client *fli
arr, _, _ := array.FromJSON(memory.DefaultAllocator,
arrow.PrimitiveTypes.Int64, strings.NewReader("[1]"))
defer arr.Release()
- params := array.NewRecord(getQuerySchema(), []arrow.Array{arr}, 1)
+ params := array.NewRecordBatch(getQuerySchema(), []arrow.Array{arr}, 1)
defer params.Release()
prepared.SetParameters(params)
@@ -2316,7 +2316,7 @@ func (m *flightSqlExtensionScenarioTester)
ValidateMetadataRetrieval(client *fli
infoValues := make(flightsql.SqlInfoResultMap)
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
names, values := rec.Column(0).(*array.Uint32),
rec.Column(1).(*array.DenseUnion)
for i := 0; i < int(rec.NumRows()); i++ {
@@ -2439,7 +2439,7 @@ func (m *flightSqlExtensionScenarioTester)
ValidateStatementExecution(client *fl
func (m *flightSqlExtensionScenarioTester)
ValidatePreparedStatementExecution(client *flightsql.Client) error {
arr, _, _ := array.FromJSON(memory.DefaultAllocator,
arrow.PrimitiveTypes.Int64, strings.NewReader("[1]"))
defer arr.Release()
- params := array.NewRecord(getQuerySchema(), []arrow.Array{arr}, 1)
+ params := array.NewRecordBatch(getQuerySchema(), []arrow.Array{arr}, 1)
defer params.Release()
ctx := context.Background()
@@ -2564,7 +2564,7 @@ func (m *flightSqlExtensionScenarioTester)
ValidateTransactions(client *flightsq
arr, _, _ := array.FromJSON(memory.DefaultAllocator,
arrow.PrimitiveTypes.Int64, strings.NewReader("[1]"))
defer arr.Release()
- params := array.NewRecord(getQuerySchema(), []arrow.Array{arr}, 1)
+ params := array.NewRecordBatch(getQuerySchema(), []arrow.Array{arr}, 1)
defer params.Release()
prepared, err := txn.Prepare(ctx, "SELECT PREPARED STATEMENT")
@@ -3025,7 +3025,7 @@ func (m *flightSqlIngestionScenarioTester)
DoPutCommandStatementIngest(ctx conte
var nRecords int64
for rdr.Next() {
- rec := rdr.Record()
+ rec := rdr.RecordBatch()
nRecords += rec.NumRows()
if err := assertEq(true, expectedSchema.Equal(rec.Schema()));
err != nil {
@@ -3075,7 +3075,7 @@ func getIngestRecords() array.RecordReader {
arr := array.MakeArrayOfNull(memory.DefaultAllocator,
arrow.PrimitiveTypes.Int64, int(ingestStatementExpectedRows))
defer arr.Release()
- rec := array.NewRecord(schema, []arrow.Array{arr},
ingestStatementExpectedRows)
+ rec := array.NewRecordBatch(schema, []arrow.Array{arr},
ingestStatementExpectedRows)
defer rec.Release()
rdr, _ := array.NewRecordReader(schema, []arrow.RecordBatch{rec})
diff --git a/arrow/ipc/cmd/arrow-cat/main.go b/arrow/ipc/cmd/arrow-cat/main.go
index d4417f32..ca6cf886 100644
--- a/arrow/ipc/cmd/arrow-cat/main.go
+++ b/arrow/ipc/cmd/arrow-cat/main.go
@@ -100,7 +100,7 @@ func processStream(w io.Writer, rin io.Reader) error {
for r.Next() {
n++
fmt.Fprintf(w, "record %d...\n", n)
- rec := r.Record()
+ rec := r.RecordBatch()
for i, col := range rec.Columns() {
fmt.Fprintf(w, " col[%d] %q: %v\n", i,
rec.ColumnName(i), col)
}
@@ -154,7 +154,7 @@ func processFile(w io.Writer, fname string) error {
fmt.Fprintf(w, "version: %v\n", r.Version())
for i := 0; i < r.NumRecords(); i++ {
fmt.Fprintf(w, "record %d/%d...\n", i+1, r.NumRecords())
- rec, err := r.Record(i)
+ rec, err := r.RecordBatch(i)
if err != nil {
return err
}
diff --git a/arrow/ipc/cmd/arrow-cat/main_test.go
b/arrow/ipc/cmd/arrow-cat/main_test.go
index da5133e3..0e25ee64 100644
--- a/arrow/ipc/cmd/arrow-cat/main_test.go
+++ b/arrow/ipc/cmd/arrow-cat/main_test.go
@@ -525,7 +525,7 @@ record 3/3...
var w interface {
io.Closer
- Write(arrow.Record) error
+ Write(arrow.RecordBatch) error
}
switch {
diff --git a/arrow/ipc/cmd/arrow-ls/main_test.go
b/arrow/ipc/cmd/arrow-ls/main_test.go
index 79e68287..f90e4a80 100644
--- a/arrow/ipc/cmd/arrow-ls/main_test.go
+++ b/arrow/ipc/cmd/arrow-ls/main_test.go
@@ -284,7 +284,7 @@ records: 3
var w interface {
io.Closer
- Write(arrow.Record) error
+ Write(arrow.RecordBatch) error
}
switch {
diff --git a/arrow/ipc/file_reader.go b/arrow/ipc/file_reader.go
index 2d83fcc7..3c4b1b7e 100644
--- a/arrow/ipc/file_reader.go
+++ b/arrow/ipc/file_reader.go
@@ -488,7 +488,7 @@ func newRecordBatch(schema *arrow.Schema, memo
*dictutils.Memo, meta *memory.Buf
defer cols[i].Release()
}
- return array.NewRecord(schema, cols, rows)
+ return array.NewRecordBatch(schema, cols, rows)
}
type ipcSource struct {
diff --git a/arrow/ipc/ipc_test.go b/arrow/ipc/ipc_test.go
index 1484ddb0..a23b3476 100644
--- a/arrow/ipc/ipc_test.go
+++ b/arrow/ipc/ipc_test.go
@@ -64,18 +64,18 @@ func TestArrow12072(t *testing.T) {
[]string{strconv.Itoa(rand.Intn(100))}, nil)
}
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
- tbl := array.NewTableFromRecords(schema, []arrow.Record{rec})
+ tbl := array.NewTableFromRecords(schema, []arrow.RecordBatch{rec})
defer tbl.Release()
tr := array.NewTableReader(tbl, 1)
defer tr.Release()
- data := []arrow.Record{}
+ data := []arrow.RecordBatch{}
for tr.Next() {
- rec := tr.Record()
+ rec := tr.RecordBatch()
rec.Retain()
defer rec.Release()
data = append(data, rec)
@@ -98,7 +98,7 @@ func TestArrow12072(t *testing.T) {
rdr, err := ipc.NewReader(bytes.NewReader(buf))
assert.NoError(t, err)
for rdr.Next() {
- out := rdr.Record()
+ out := rdr.RecordBatch()
assert.Truef(t, array.RecordEqual(rec, out),
"expected: %s\ngot: %s\n", rec, out)
}
assert.NoError(t, rdr.Err())
@@ -178,7 +178,7 @@ func writeThenReadTable(t *testing.T, alloc
memory.Allocator, table arrow.Table)
tr := array.NewTableReader(table, 0)
defer tr.Release()
for tr.Next() {
- require.NoError(t, writer.Write(tr.Record()))
+ require.NoError(t, writer.Write(tr.RecordBatch()))
}
require.NoError(t, writer.Close())
@@ -186,9 +186,9 @@ func writeThenReadTable(t *testing.T, alloc
memory.Allocator, table arrow.Table)
reader, err := ipc.NewReader(buf, ipc.WithAllocator(alloc))
require.NoError(t, err)
defer reader.Release()
- records := make([]arrow.Record, 0)
+ records := make([]arrow.RecordBatch, 0)
for reader.Next() {
- rec := reader.Record()
+ rec := reader.RecordBatch()
rec.Retain()
defer rec.Release()
records = append(records, rec)
@@ -286,10 +286,10 @@ func TestIPCTable(t *testing.T) {
defer b.Release()
b.Field(0).(*array.Int32Builder).AppendValues([]int32{1, 2, 3, 4},
[]bool{true, true, false, true})
- rec1 := b.NewRecord()
+ rec1 := b.NewRecordBatch()
defer rec1.Release()
- tbl := array.NewTableFromRecords(schema, []arrow.Record{rec1})
+ tbl := array.NewTableFromRecords(schema, []arrow.RecordBatch{rec1})
defer tbl.Release()
var buf bytes.Buffer
@@ -307,7 +307,7 @@ func TestIPCTable(t *testing.T) {
n := 0
for tr.Next() {
- rec := tr.Record()
+ rec := tr.RecordBatch()
for i, col := range rec.Columns() {
t.Logf("rec[%d][%q]: %v nulls:%v\n", n,
rec.ColumnName(i), col, col.NullBitmapBytes())
@@ -326,7 +326,7 @@ func TestIPCTable(t *testing.T) {
}
n = 0
for ipcReader.Next() {
- rec := ipcReader.Record()
+ rec := ipcReader.RecordBatch()
for i, col := range rec.Columns() {
t.Logf("rec[%d][%q]: %v nulls:%v\n", n,
rec.ColumnName(i), col, col.NullBitmapBytes())
@@ -362,7 +362,7 @@ func TestDictionary(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
// Create a first record with field = "value_0"
- record := array.NewRecord(schema, []arrow.Array{arr}, 1)
+ record := array.NewRecordBatch(schema, []arrow.Array{arr}, 1)
defer record.Release()
expectedJson, err := record.MarshalJSON()
@@ -377,7 +377,7 @@ func TestDictionary(t *testing.T) {
require.NoError(t, bldr.UnmarshalJSON([]byte(`["value_1"]`)))
arr = bldr.NewArray()
defer arr.Release()
- record = array.NewRecord(schema, []arrow.Array{arr}, 1)
+ record = array.NewRecordBatch(schema, []arrow.Array{arr}, 1)
// record, _, err = array.RecordFromJSON(pool, schema,
strings.NewReader(`[{"field": ["value_1"]}]`))
// require.NoError(t, err)
@@ -422,7 +422,7 @@ func TestDictionaryDeltas(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
// Create a first record with field = "value_0"
- record := array.NewRecord(schema, []arrow.Array{arr}, 1)
+ record := array.NewRecordBatch(schema, []arrow.Array{arr}, 1)
defer record.Release()
expectedJson, err := record.MarshalJSON()
@@ -437,7 +437,7 @@ func TestDictionaryDeltas(t *testing.T) {
require.NoError(t, bldr.UnmarshalJSON([]byte(`["value_1"]`)))
arr = bldr.NewArray()
defer arr.Release()
- record = array.NewRecord(schema, []arrow.Array{arr}, 1)
+ record = array.NewRecordBatch(schema, []arrow.Array{arr}, 1)
defer record.Release()
expectedJson, err = record.MarshalJSON()
@@ -455,7 +455,7 @@ func TestDictionaryDeltas(t *testing.T) {
// Encode and decode a record over a tuple of IPC writer and reader.
// IPC writer and reader are the same from one call to another.
func encodeDecodeIpcStream(t *testing.T,
- record arrow.Record,
+ record arrow.RecordBatch,
bufWriter *bytes.Buffer, ipcWriter *ipc.Writer,
bufReader *bytes.Reader, ipcReader *ipc.Reader) ([]byte, *ipc.Reader,
error) {
@@ -476,7 +476,7 @@ func encodeDecodeIpcStream(t *testing.T,
ipcReader = newIpcReader
}
ipcReader.Next()
- record = ipcReader.Record()
+ record = ipcReader.RecordBatch()
// Return the decoded record as a json string
json, err := record.MarshalJSON()
@@ -507,7 +507,7 @@ func Example_mapSlice() {
}
defer arr.Release()
- rec := array.NewRecord(schema, []arrow.Array{arr}, int64(arr.Len()))
+ rec := array.NewRecordBatch(schema, []arrow.Array{arr},
int64(arr.Len()))
defer rec.Release()
rec2 := rec.NewSlice(1, 2)
defer rec2.Release()
@@ -528,7 +528,7 @@ func Example_mapSlice() {
defer r.Release()
r.Next()
- fmt.Println(r.Record())
+ fmt.Println(r.RecordBatch())
// Output:
// record:
@@ -559,7 +559,7 @@ func Example_listSlice() {
}
defer arr.Release()
- rec := array.NewRecord(schema, []arrow.Array{arr}, int64(arr.Len()))
+ rec := array.NewRecordBatch(schema, []arrow.Array{arr},
int64(arr.Len()))
defer rec.Release()
rec2 := rec.NewSlice(1, 2)
defer rec2.Release()
@@ -580,7 +580,7 @@ func Example_listSlice() {
defer r.Release()
r.Next()
- fmt.Println(r.Record())
+ fmt.Println(r.RecordBatch())
// Output:
// record:
@@ -605,7 +605,7 @@ func TestIpcEmptyMap(t *testing.T) {
require.NoError(t, err)
defer arr.Release()
- rec := array.NewRecord(schema, []arrow.Array{arr}, int64(arr.Len()))
+ rec := array.NewRecordBatch(schema, []arrow.Array{arr},
int64(arr.Len()))
defer rec.Release()
var buf bytes.Buffer
@@ -618,8 +618,8 @@ func TestIpcEmptyMap(t *testing.T) {
defer r.Release()
assert.True(t, r.Next())
- assert.Zero(t, r.Record().NumRows())
- assert.True(t, arrow.TypeEqual(dt, r.Record().Column(0).DataType()))
+ assert.Zero(t, r.RecordBatch().NumRows())
+ assert.True(t, arrow.TypeEqual(dt,
r.RecordBatch().Column(0).DataType()))
}
// GH-41993
@@ -662,7 +662,7 @@ func TestArrowBinaryIPCWriterTruncatedVOffsets(t
*testing.T) {
Nullable: true,
},
}, nil)
- record := array.NewRecord(schema, []arrow.Array{str}, 2)
+ record := array.NewRecordBatch(schema, []arrow.Array{str}, 2)
var output bytes.Buffer
writer := ipc.NewWriter(&output, ipc.WithSchema(schema))
@@ -677,7 +677,7 @@ func TestArrowBinaryIPCWriterTruncatedVOffsets(t
*testing.T) {
require.True(t, reader.Next())
require.NoError(t, reader.Err())
- rec := reader.Record()
+ rec := reader.RecordBatch()
require.EqualValues(t, 1, rec.NumCols())
require.EqualValues(t, 2, rec.NumRows())
diff --git a/arrow/ipc/message_test.go b/arrow/ipc/message_test.go
index 71733da0..a69738e8 100644
--- a/arrow/ipc/message_test.go
+++ b/arrow/ipc/message_test.go
@@ -81,7 +81,7 @@ func writeRecordsIntoBuffer(t *testing.T, numRecords int)
*bytes.Buffer {
return buf
}
-func getTestRecords(mem memory.Allocator, numRecords int) (*arrow.Schema,
[]arrow.Record) {
+func getTestRecords(mem memory.Allocator, numRecords int) (*arrow.Schema,
[]arrow.RecordBatch) {
meta := arrow.NewMetadata([]string{}, []string{})
s := arrow.NewSchema([]arrow.Field{
{Name: "test-col", Type: arrow.PrimitiveTypes.Int64},
@@ -90,13 +90,13 @@ func getTestRecords(mem memory.Allocator, numRecords int)
(*arrow.Schema, []arro
builder := array.NewRecordBuilder(mem, s)
defer builder.Release()
- recs := make([]arrow.Record, numRecords)
+ recs := make([]arrow.RecordBatch, numRecords)
for i := 0; i < len(recs); i++ {
col := builder.Field(0).(*array.Int64Builder)
for i := 0; i < 10; i++ {
col.Append(int64(i))
}
- recs[i] = builder.NewRecord()
+ recs[i] = builder.NewRecordBatch()
}
return s, recs
diff --git a/arrow/ipc/metadata_test.go b/arrow/ipc/metadata_test.go
index 0acce426..64898890 100644
--- a/arrow/ipc/metadata_test.go
+++ b/arrow/ipc/metadata_test.go
@@ -187,7 +187,7 @@ func TestUnrecognizedExtensionType(t *testing.T) {
extArr := exampleUUID(pool)
defer extArr.Release()
- batch := array.NewRecord(
+ batch := array.NewRecordBatch(
arrow.NewSchema([]arrow.Field{
{Name: "f0", Type: extArr.DataType(), Nullable: true}},
nil),
[]arrow.Array{extArr}, 4)
@@ -217,7 +217,7 @@ func TestUnrecognizedExtensionType(t *testing.T) {
// create a record batch with the same data, but the field should
contain the
// extension metadata and be of the storage type instead of being the
extension type.
extMetadata := arrow.NewMetadata([]string{ExtensionTypeKeyName,
ExtensionMetadataKeyName}, []string{"uuid", "uuid-serialized"})
- batchNoExt := array.NewRecord(
+ batchNoExt := array.NewRecordBatch(
arrow.NewSchema([]arrow.Field{
{Name: "f0", Type: storageArr.DataType(), Nullable:
true, Metadata: extMetadata},
}, nil), []arrow.Array{storageArr}, 4)
diff --git a/arrow/ipc/reader_test.go b/arrow/ipc/reader_test.go
index a78bb758..29db5985 100644
--- a/arrow/ipc/reader_test.go
+++ b/arrow/ipc/reader_test.go
@@ -40,7 +40,7 @@ func TestReaderCatchPanic(t *testing.T) {
defer b.Release()
b.Field(0).(*array.StringBuilder).AppendValues([]string{"foo", "bar",
"baz"}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
buf := new(bytes.Buffer)
@@ -81,7 +81,7 @@ func TestReaderCheckedAllocator(t *testing.T) {
bldr.Append([]byte("bar"))
bldr.Append([]byte("baz"))
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
buf := new(bytes.Buffer)
@@ -105,10 +105,10 @@ func TestMappedReader(t *testing.T) {
defer b.Release()
b.Field(0).(*array.Int32Builder).AppendValues([]int32{1, 2, 3, 4},
[]bool{true, true, false, true})
- rec1 := b.NewRecord()
+ rec1 := b.NewRecordBatch()
defer rec1.Release()
- tbl := array.NewTableFromRecords(schema, []arrow.Record{rec1})
+ tbl := array.NewTableFromRecords(schema, []arrow.RecordBatch{rec1})
defer tbl.Release()
var buf bytes.Buffer
@@ -121,7 +121,7 @@ func TestMappedReader(t *testing.T) {
n := 0
for tr.Next() {
- rec := tr.Record()
+ rec := tr.RecordBatch()
for i, col := range rec.Columns() {
t.Logf("rec[%d][%q]: %v nulls:%v\n", n,
rec.ColumnName(i), col, col.NullBitmapBytes())
@@ -192,7 +192,7 @@ func BenchmarkIPC(b *testing.B) {
bldr.Append([]byte("bar"))
bldr.Append([]byte("baz"))
- rec := rb.NewRecord()
+ rec := rb.NewRecordBatch()
defer rec.Release()
for _, codec := range []struct {
diff --git a/arrow/ipc/writer.go b/arrow/ipc/writer.go
index ab068511..2fcf3dfb 100644
--- a/arrow/ipc/writer.go
+++ b/arrow/ipc/writer.go
@@ -306,7 +306,7 @@ func (d *dictEncoder) Encode(p *Payload, id int64, isDelta
bool, dict arrow.Arra
}()
schema := arrow.NewSchema([]arrow.Field{{Name: "dictionary", Type:
dict.DataType(), Nullable: true}}, nil)
- batch := array.NewRecord(schema, []arrow.Array{dict}, int64(dict.Len()))
+ batch := array.NewRecordBatch(schema, []arrow.Array{dict},
int64(dict.Len()))
defer batch.Release()
if err := d.encode(p, batch); err != nil {
return err
diff --git a/arrow/ipc/writer_test.go b/arrow/ipc/writer_test.go
index eea8f46a..c37b904c 100644
--- a/arrow/ipc/writer_test.go
+++ b/arrow/ipc/writer_test.go
@@ -45,10 +45,10 @@ func TestSliceAndWrite(t *testing.T) {
defer b.Release()
b.Field(0).(*array.StringBuilder).AppendValues([]string{"foo", "bar",
"baz"}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
- sliceAndWrite := func(rec arrow.Record, schema *arrow.Schema) {
+ sliceAndWrite := func(rec arrow.RecordBatch, schema *arrow.Schema) {
slice := rec.NewSlice(1, 2)
defer slice.Release()
@@ -135,7 +135,7 @@ func TestWriterCatchPanic(t *testing.T) {
defer b.Release()
b.Field(0).(*array.StringBuilder).AppendValues([]string{"foo", "bar",
"baz"}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
// mess up the first offset for the string column
@@ -160,7 +160,7 @@ func TestWriterMemCompression(t *testing.T) {
defer b.Release()
b.Field(0).(*array.StringBuilder).AppendValues([]string{"foo", "bar",
"baz"}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
var buf bytes.Buffer
@@ -240,7 +240,7 @@ func TestWriteWithCompressionAndMinSavings(t *testing.T) {
func TestWriterInferSchema(t *testing.T) {
bldr := array.NewRecordBuilder(memory.DefaultAllocator,
arrow.NewSchema([]arrow.Field{{Name: "col", Type: arrow.PrimitiveTypes.Int8}},
nil))
bldr.Field(0).(*array.Int8Builder).AppendValues([]int8{1, 2, 3, 4, 5},
nil)
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
var buf bytes.Buffer
@@ -302,7 +302,7 @@ func TestGetPayloads(t *testing.T) {
defer b.Release()
b.Field(0).(*array.StringBuilder).AppendValues([]string{"foo", "bar",
"baz"}, nil)
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
schemaPayload := GetSchemaPayload(rec.Schema(), mem)
@@ -339,7 +339,7 @@ func TestWritePayload(t *testing.T) {
bldr := array.NewRecordBuilder(mem,
arrow.NewSchema([]arrow.Field{{Name: "col", Type: arrow.PrimitiveTypes.Int8}},
nil))
bldr.Field(0).(*array.Int8Builder).AppendValues([]int8{1, 2, 3, 4, 5},
nil)
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
var buf bytes.Buffer
diff --git a/arrow/util/byte_size_test.go b/arrow/util/byte_size_test.go
index 9b511262..448419ad 100644
--- a/arrow/util/byte_size_test.go
+++ b/arrow/util/byte_size_test.go
@@ -36,7 +36,7 @@ func TestTotalArrayReusedBuffers(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
- rec := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ rec := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "a", Type: arrow.FixedWidthTypes.Boolean},
{Name: "b", Type: arrow.FixedWidthTypes.Boolean},
}, nil), []arrow.Array{arr, arr}, 1)
@@ -44,7 +44,7 @@ func TestTotalArrayReusedBuffers(t *testing.T) {
assert.Equal(t, int64(5), util.TotalRecordSize(rec))
- rec1 := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ rec1 := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "a", Type: arrow.FixedWidthTypes.Boolean},
}, nil), []arrow.Array{arr}, 1)
defer rec1.Release()
@@ -103,7 +103,7 @@ func TestTotalArraySizeRecord(t *testing.T) {
defer recordBldr.Release()
recordBldr.Field(0).(*array.Int32Builder).AppendValues([]int32{1, 2,
3}, nil)
recordBldr.Field(1).(*array.Int64Builder).AppendValues([]int64{4, 5,
6}, nil)
- record := recordBldr.NewRecord()
+ record := recordBldr.NewRecordBatch()
defer record.Release()
assert.Equal(t, int64(44), util.TotalRecordSize(record))
diff --git a/parquet/example_write_read_pq_test.go
b/parquet/example_write_read_pq_test.go
index fc93d779..688df9d9 100644
--- a/parquet/example_write_read_pq_test.go
+++ b/parquet/example_write_read_pq_test.go
@@ -95,7 +95,7 @@ func Example_writeReadParquet() {
}
// Create a record
- record := recordBuilder.NewRecord()
+ record := recordBuilder.NewRecordBatch()
if err := writer.Write(record); err != nil {
log.Fatalf("Failed to write record: %v", err)
@@ -153,7 +153,7 @@ func Example_writeReadParquet() {
for recordReader.Next() {
// Create a record
- record := recordReader.Record()
+ record := recordReader.RecordBatch()
// Get columns
intCol := record.Column(0).(*array.Int32)
diff --git a/parquet/file/column_writer_test.go
b/parquet/file/column_writer_test.go
index f63bd45b..8f5d35d6 100644
--- a/parquet/file/column_writer_test.go
+++ b/parquet/file/column_writer_test.go
@@ -813,7 +813,7 @@ func TestDictionaryReslice(t *testing.T) {
for i := 0; i < 2000; i++ {
b.Field(0).(*array.BinaryDictionaryBuilder).AppendString("test_value")
}
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
out := &bytes.Buffer{}
pqw, err := pqarrow.NewFileWriter(rec.Schema(), out,
nil, pqarrow.NewArrowWriterProperties())
assert.NoError(t, err)
diff --git a/parquet/file/file_reader_test.go b/parquet/file/file_reader_test.go
index 1641a680..1927ca87 100644
--- a/parquet/file/file_reader_test.go
+++ b/parquet/file/file_reader_test.go
@@ -670,7 +670,7 @@ func TestDeltaBinaryPackedMultipleBatches(t *testing.T) {
for i := 0; i < size; i++ {
b.Field(0).(*array.Int64Builder).Append(int64(i))
}
- rec := b.NewRecord()
+ rec := b.NewRecordBatch()
defer rec.Release()
// Write the data to Parquet using the file writer
@@ -698,7 +698,7 @@ func TestDeltaBinaryPackedMultipleBatches(t *testing.T) {
totalRows := 0
for rr.Next() {
- rec := rr.Record()
+ rec := rr.RecordBatch()
for i := 0; i < int(rec.NumRows()); i++ {
col := rec.Column(0).(*array.Int64)
@@ -878,7 +878,7 @@ func TestDeltaByteArray(t *testing.T) {
defer rr.Release()
for rr.Next() {
- rec := rr.Record()
+ rec := rr.RecordBatch()
for i := range int(rec.NumCols()) {
vals := rec.Column(i)
for j := range vals.Len() {
@@ -927,7 +927,7 @@ func TestListColumns(t *testing.T) {
defer rr.Release()
for rr.Next() {
- rec := rr.Record()
+ rec := rr.RecordBatch()
for i := range int(rec.NumCols()) {
vals := rec.Column(i)
for j := range vals.Len() {
diff --git a/parquet/pqarrow/encode_arrow_test.go
b/parquet/pqarrow/encode_arrow_test.go
index e9df2af9..3ce4c98e 100644
--- a/parquet/pqarrow/encode_arrow_test.go
+++ b/parquet/pqarrow/encode_arrow_test.go
@@ -166,10 +166,10 @@ func makeDateTypeTable(mem memory.Allocator, expected
bool, partialDays bool) ar
bldr.Field(0).(*array.Date64Builder).AppendValues(d64Values,
isValid)
}
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
- return array.NewTableFromRecords(arrsc, []arrow.Record{rec})
+ return array.NewTableFromRecords(arrsc, []arrow.RecordBatch{rec})
}
func makeTimestampTypeTable(mem memory.Allocator, expected bool) arrow.Table {
@@ -201,10 +201,10 @@ func makeTimestampTypeTable(mem memory.Allocator,
expected bool) arrow.Table {
bldr.Field(0).(*array.TimestampBuilder).AppendValues(ts64msValues,
isValid)
bldr.Field(1).(*array.TimestampBuilder).AppendValues(ts64msValues,
isValid)
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
- return array.NewTableFromRecords(arrsc, []arrow.Record{rec})
+ return array.NewTableFromRecords(arrsc, []arrow.RecordBatch{rec})
}
func TestWriteArrowCols(t *testing.T) {
@@ -448,7 +448,7 @@ func TestWriteKeyValueMetadata(t *testing.T) {
b.AppendNull()
}
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
props := parquet.NewWriterProperties(
@@ -493,7 +493,7 @@ func TestWriteEmptyLists(t *testing.T) {
b.AppendNull()
}
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
props := parquet.NewWriterProperties(
@@ -532,7 +532,7 @@ func TestWriteAllNullsWithDeltaEncoding(t *testing.T) {
b.AppendNull()
}
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
props := parquet.NewWriterProperties(
@@ -2005,7 +2005,7 @@ func TestForceLargeTypes(t *testing.T) {
bldr.Field(0).(*array.LargeStringBuilder).AppendValues([]string{"hello", "foo",
"bar"}, nil)
bldr.Field(1).(*array.BinaryBuilder).AppendValues([][]byte{[]byte("hello"),
[]byte("foo"), []byte("bar")}, nil)
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
var buf bytes.Buffer
@@ -2059,7 +2059,7 @@ func TestBufferedRecWrite(t *testing.T) {
array.NewStructData(structData),
}
- rec := array.NewRecord(sc, cols, SIZELEN)
+ rec := array.NewRecordBatch(sc, cols, SIZELEN)
defer rec.Release()
var (
@@ -2257,7 +2257,7 @@ func (ps *ParquetIOTestSuite)
TestArrowExtensionTypeLogicalType() {
bldr.Field(0).(*extensions.UUIDBuilder).Append(uuid.New())
bldr.Field(1).(*array.ExtensionBuilder).AppendValueFromString(`{"hello":
["world", 2, true], "world": null}`)
- rec := bldr.NewRecord()
+ rec := bldr.NewRecordBatch()
defer rec.Release()
var buf bytes.Buffer
@@ -2305,7 +2305,7 @@ func TestWriteTableMemoryAllocation(t *testing.T) {
abld.ValueBuilder().(*array.Int64Builder).Append(2)
bld.Field(4).(*extensions.UUIDBuilder).Append(uuid.MustParse("00000000-0000-0000-0000-000000000001"))
- rec := bld.NewRecord()
+ rec := bld.NewRecordBatch()
bld.Release()
var buf bytes.Buffer
@@ -2332,7 +2332,7 @@ func TestEmptyListDeltaBinaryPacked(t *testing.T) {
listBuilder := builder.Field(0).(*array.ListBuilder)
listBuilder.Append(true)
- arrowRec := builder.NewRecord()
+ arrowRec := builder.NewRecordBatch()
defer arrowRec.Release()
var buf bytes.Buffer
@@ -2383,7 +2383,7 @@ func TestReadWriteNonShreddedVariant(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
- rec := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ rec := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "variant", Type: arr.DataType(), Nullable: true},
}, nil), []arrow.Array{arr}, -1)
@@ -2436,7 +2436,7 @@ func TestReadWriteShreddedVariant(t *testing.T) {
arr := bldr.NewArray()
defer arr.Release()
- rec := array.NewRecord(arrow.NewSchema([]arrow.Field{
+ rec := array.NewRecordBatch(arrow.NewSchema([]arrow.Field{
{Name: "variant", Type: arr.DataType(), Nullable: true},
}, nil), []arrow.Array{arr}, -1)
diff --git a/parquet/pqarrow/encode_dictionary_test.go
b/parquet/pqarrow/encode_dictionary_test.go
index fecdc649..79d3147d 100644
--- a/parquet/pqarrow/encode_dictionary_test.go
+++ b/parquet/pqarrow/encode_dictionary_test.go
@@ -478,9 +478,9 @@ func (ar *ArrowReadDictSuite)
checkStreamReadWholeFile(expected arrow.Table) {
ar.Require().NoError(err)
defer rrdr.Release()
- recs := make([]arrow.Record, 0)
+ recs := make([]arrow.RecordBatch, 0)
for rrdr.Next() {
- rec := rrdr.Record()
+ rec := rrdr.RecordBatch()
rec.Retain()
defer rec.Release()
recs = append(recs, rec)
diff --git a/parquet/pqarrow/file_reader.go b/parquet/pqarrow/file_reader.go
index b7f95178..c85b2fc9 100644
--- a/parquet/pqarrow/file_reader.go
+++ b/parquet/pqarrow/file_reader.go
@@ -719,7 +719,7 @@ type recordReader struct {
parallel bool
sc *arrow.Schema
fieldReaders []*ColumnReader
- cur arrow.Record
+ cur arrow.RecordBatch
err error
refCount atomic.Int64
@@ -807,7 +807,7 @@ func (r *recordReader) next() bool {
}
}
- r.cur = array.NewRecord(r.sc, cols, -1)
+ r.cur = array.NewRecordBatch(r.sc, cols, -1)
return true
}
@@ -862,7 +862,7 @@ func (r *recordReader) next() bool {
return false
}
- r.cur = array.NewRecord(r.sc, cols, -1)
+ r.cur = array.NewRecordBatch(r.sc, cols, -1)
return true
}
@@ -891,7 +891,7 @@ func (r *recordReader) Err() error {
return r.err
}
-func (r *recordReader) Read() (arrow.Record, error) {
+func (r *recordReader) Read() (arrow.RecordBatch, error) {
if r.cur != nil {
r.cur.Release()
r.cur = nil
diff --git a/parquet/pqarrow/file_reader_test.go
b/parquet/pqarrow/file_reader_test.go
index 1745f0a1..00c68b8d 100644
--- a/parquet/pqarrow/file_reader_test.go
+++ b/parquet/pqarrow/file_reader_test.go
@@ -216,9 +216,9 @@ func TestRecordReaderParallel(t *testing.T) {
assert.NotNil(t, rr)
defer rr.Release()
- records := make([]arrow.Record, 0)
+ records := make([]arrow.RecordBatch, 0)
for rr.Next() {
- rec := rr.Record()
+ rec := rr.RecordBatch()
defer rec.Release()
assert.Truef(t, sc.Equal(rec.Schema()), "expected: %s\ngot:
%s", sc, rec.Schema())
@@ -232,9 +232,9 @@ func TestRecordReaderParallel(t *testing.T) {
defer tr.Release()
assert.True(t, tr.Next())
- assert.Truef(t, array.RecordEqual(tr.Record(), records[0]), "expected:
%s\ngot: %s", tr.Record(), records[0])
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), records[0]),
"expected: %s\ngot: %s", tr.RecordBatch(), records[0])
assert.True(t, tr.Next())
- assert.Truef(t, array.RecordEqual(tr.Record(), records[1]), "expected:
%s\ngot: %s", tr.Record(), records[1])
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), records[1]),
"expected: %s\ngot: %s", tr.RecordBatch(), records[1])
}
func TestRecordReaderSerial(t *testing.T) {
@@ -268,17 +268,17 @@ func TestRecordReaderSerial(t *testing.T) {
rec, err := rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.Same(t, io.EOF, err)
@@ -318,28 +318,28 @@ func TestRecordReaderSeekToRow(t *testing.T) {
rec, err := rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec),
"expected: %s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(),
rec), "expected: %s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(0))
rec, err = rr.Read()
assert.NoError(t, err)
- assert.Truef(t, array.RecordEqual(tr.Record(), rec),
"expected: %s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(),
rec), "expected: %s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec),
"expected: %s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(),
rec), "expected: %s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(2))
rec, err = rr.Read()
assert.NoError(t, err)
- assert.Truef(t, array.RecordEqual(tr.Record(), rec),
"expected: %s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(),
rec), "expected: %s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(4))
rec, err = rr.Read()
tr.Next()
assert.NoError(t, err)
- assert.Truef(t, array.RecordEqual(tr.Record(), rec),
"expected: %s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(),
rec), "expected: %s\ngot: %s", tr.RecordBatch(), rec)
})
}
}
@@ -375,17 +375,17 @@ func TestRecordReaderMultiRowGroup(t *testing.T) {
rec, err := rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.Same(t, io.EOF, err)
@@ -423,32 +423,32 @@ func TestRecordReaderSeekToRowMultiRowGroup(t *testing.T)
{
rec, err := rr.Read()
assert.NoError(t, err)
tr.Next()
- first := tr.Record()
+ first := tr.RecordBatch()
first.Retain()
defer first.Release()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(0))
rec, err = rr.Read()
assert.NoError(t, err)
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
rec, err = rr.Read()
assert.NoError(t, err)
tr.Next()
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(2))
rec, err = rr.Read()
assert.NoError(t, err)
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(4))
rec, err = rr.Read()
tr.Next()
assert.NoError(t, err)
- assert.Truef(t, array.RecordEqual(tr.Record(), rec), "expected:
%s\ngot: %s", tr.Record(), rec)
+ assert.Truef(t, array.RecordEqual(tr.RecordBatch(), rec), "expected:
%s\ngot: %s", tr.RecordBatch(), rec)
require.NoError(t, rr.SeekToRow(0))
rec, err = rr.Read()
diff --git a/parquet/pqarrow/file_writer.go b/parquet/pqarrow/file_writer.go
index e4e99368..1560d0c7 100644
--- a/parquet/pqarrow/file_writer.go
+++ b/parquet/pqarrow/file_writer.go
@@ -164,13 +164,13 @@ func (fw *FileWriter) NumRows() int {
//
// More memory is utilized compared to Write as the whole row group data is
kept in memory before it's written
// since Parquet files must have an entire column written before writing the
next column.
-func (fw *FileWriter) WriteBuffered(rec arrow.Record) error {
+func (fw *FileWriter) WriteBuffered(rec arrow.RecordBatch) error {
if !rec.Schema().Equal(fw.schema) {
return fmt.Errorf("record schema does not match writer's.
\nrecord: %s\nwriter: %s", rec.Schema(), fw.schema)
}
var (
- recList []arrow.Record
+ recList []arrow.RecordBatch
maxRows = fw.wr.Properties().MaxRowGroupLength()
curRows int
err error
@@ -184,9 +184,9 @@ func (fw *FileWriter) WriteBuffered(rec arrow.Record) error
{
}
if int64(curRows)+rec.NumRows() <= maxRows {
- recList = []arrow.Record{rec}
+ recList = []arrow.RecordBatch{rec}
} else {
- recList = []arrow.Record{rec.NewSlice(0,
maxRows-int64(curRows))}
+ recList = []arrow.RecordBatch{rec.NewSlice(0,
maxRows-int64(curRows))}
defer recList[0].Release()
for offset := maxRows - int64(curRows); offset < rec.NumRows();
offset += maxRows {
s := rec.NewSlice(offset, offset+utils.Min(maxRows,
rec.NumRows()-offset))
@@ -218,22 +218,22 @@ func (fw *FileWriter) WriteBuffered(rec arrow.Record)
error {
// Performance-wise Write might be more favorable than WriteBuffered if you're
dealing with:
// * a highly-restricted memory environment
// * very large records with lots of rows (potentially close to the max row
group length)
-func (fw *FileWriter) Write(rec arrow.Record) error {
+func (fw *FileWriter) Write(rec arrow.RecordBatch) error {
if !rec.Schema().Equal(fw.schema) {
return fmt.Errorf("record schema does not match writer's.
\nrecord: %s\nwriter: %s", rec.Schema(), fw.schema)
}
- var recList []arrow.Record
+ var recList []arrow.RecordBatch
rowgroupLen := fw.wr.Properties().MaxRowGroupLength()
if rec.NumRows() > rowgroupLen {
- recList = make([]arrow.Record, 0)
+ recList = make([]arrow.RecordBatch, 0)
for offset := int64(0); offset < rec.NumRows(); offset +=
rowgroupLen {
s := rec.NewSlice(offset, offset+utils.Min(rowgroupLen,
rec.NumRows()-offset))
defer s.Release()
recList = append(recList, s)
}
} else {
- recList = []arrow.Record{rec}
+ recList = []arrow.RecordBatch{rec}
}
for _, r := range recList {
diff --git a/parquet/pqarrow/reader_writer_test.go
b/parquet/pqarrow/reader_writer_test.go
index f3468762..29867cd2 100644
--- a/parquet/pqarrow/reader_writer_test.go
+++ b/parquet/pqarrow/reader_writer_test.go
@@ -316,8 +316,8 @@ func buildTableForTest(mem memory.Allocator) arrow.Table {
bldr.Field(5).(*array.BooleanBuilder).Append(true)
}
- rec := bldr.NewRecord()
- return array.NewTableFromRecords(schema, []arrow.Record{rec})
+ rec := bldr.NewRecordBatch()
+ return array.NewTableFromRecords(schema, []arrow.RecordBatch{rec})
}
func BenchmarkWriteTableCompressed(b *testing.B) {