zeroshade commented on code in PR #34666:
URL: https://github.com/apache/arrow/pull/34666#discussion_r1145116487
##########
go/arrow/compute/vector_run_end_test.go:
##########
@@ -295,3 +299,125 @@ func TestRunEndFunctions(t *testing.T) {
})
}
}
+
+func benchRunEndEncode(b *testing.B, sz int, nullProb float64, runEndType,
valueType arrow.DataType) {
+ b.Run("encode", func(b *testing.B) {
+ var (
+ mem =
memory.NewCheckedAllocator(memory.DefaultAllocator)
+ rng = gen.NewRandomArrayGenerator(seed, mem)
+ )
+
+ values := rng.ArrayOf(valueType.ID(), int64(sz), nullProb)
+ b.Cleanup(func() {
+ values.Release()
+ })
+
+ var (
+ res compute.Datum
+ err error
+ ctx = compute.WithAllocator(context.Background(), mem)
+ input = &compute.ArrayDatum{Value: values.Data()}
+ opts = compute.RunEndEncodeOptions{RunEndType:
runEndType}
+
+ byts int64
+ )
+
+ for _, buf := range values.Data().Buffers() {
+ if buf != nil {
+ byts += int64(buf.Len())
+ }
+ }
+
+ b.SetBytes(byts)
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ res, err = compute.RunEndEncode(ctx, opts, input)
Review Comment:
so right now it's using a specific seed to ensure consistency, but that's a
good point that this benchmark is essentially still just benchmarking a random
normal distribution of *values* with no guarantees to any particular
distribution of runs and run-lengths. It might make more sense to restrict the
min/max values to better guarantee some runs.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]