This is an automated email from the ASF dual-hosted git repository.

zeroshade pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/main by this push:
     new 9eaee2a532 GH-34784: [Go] Fix 32-bit build (#35767)
9eaee2a532 is described below

commit 9eaee2a532ef3f13de7f0448d6c61a02b33730ea
Author: Matt Topol <[email protected]>
AuthorDate: Fri May 26 11:40:16 2023 -0400

    GH-34784: [Go] Fix 32-bit build (#35767)
    
    
    
    ### Rationale for this change
    Two locations in the code cause issues when building with `GOARCH=386` 
(e.g. 32-bit systems).
    
    ### What changes are included in this PR?
    In the `compute` package we assume a 64-bit system when using an untyped 
`int` to hold `math.MaxInt64` which overflows on a 32-bit system. So we just 
explicitly identify it as an `int64`
    
    In the `cdata` package we use the older 
`*(*[maxlen]*C.void)(unsafe.Pointer(.....))[:]` syntax to retrieve the `void**` 
for the buffers, with maxlen set to a very large constant. Unfortunately on a 
32-bit system this is larger than the address space. Instead we switch to using 
the `unsafe.Slice` method that was added in go1.17.
    
    * Closes: #34784
    
    Authored-by: Matt Topol <[email protected]>
    Signed-off-by: Matt Topol <[email protected]>
---
 go/arrow/cdata/cdata.go                 | 15 +++++++--------
 go/arrow/compute/internal/exec/utils.go | 10 +++++-----
 2 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/go/arrow/cdata/cdata.go b/go/arrow/cdata/cdata.go
index c62940d859..638d39b02e 100644
--- a/go/arrow/cdata/cdata.go
+++ b/go/arrow/cdata/cdata.go
@@ -100,12 +100,12 @@ var formatToSimpleType = map[string]arrow.DataType{
 
 // decode metadata from C which is encoded as
 //
-//  [int32] -> number of metadata pairs
-//     for 0..n
-//             [int32] -> number of bytes in key
-//             [n bytes] -> key value
-//             [int32] -> number of bytes in value
-//             [n bytes] -> value
+//      [int32] -> number of metadata pairs
+//             for 0..n
+//                     [int32] -> number of bytes in key
+//                     [n bytes] -> key value
+//                     [int32] -> number of bytes in value
+//                     [n bytes] -> value
 func decodeCMetadata(md *C.char) arrow.Metadata {
        if md == nil {
                return arrow.Metadata{}
@@ -413,8 +413,7 @@ func (imp *cimporter) doImport(src *CArrowArray) error {
 
        if imp.arr.n_buffers > 0 {
                // get a view of the buffers, zero-copy. we're just looking at 
the pointers
-               const maxlen = 0x7fffffff
-               imp.cbuffers = 
(*[maxlen]*C.void)(unsafe.Pointer(imp.arr.buffers))[:imp.arr.n_buffers:imp.arr.n_buffers]
+               imp.cbuffers = 
unsafe.Slice((**C.void)(unsafe.Pointer(imp.arr.buffers)), imp.arr.n_buffers)
        }
 
        // handle each of our type cases
diff --git a/go/arrow/compute/internal/exec/utils.go 
b/go/arrow/compute/internal/exec/utils.go
index 21a4eaf61d..39c4fff909 100644
--- a/go/arrow/compute/internal/exec/utils.go
+++ b/go/arrow/compute/internal/exec/utils.go
@@ -239,21 +239,21 @@ func RechunkArraysConsistently(groups [][]arrow.Array) 
[][]arrow.Array {
        }
 
        rechunked := make([][]arrow.Array, len(groups))
-       offsets := make([]int, len(groups))
+       offsets := make([]int64, len(groups))
        // scan all array vectors at once, rechunking along the way
        var start int64
        for start < int64(totalLen) {
                // first compute max possible length for next chunk
-               chunkLength := math.MaxInt64
+               var chunkLength int64 = math.MaxInt64
                for i, g := range groups {
                        offset := offsets[i]
                        // skip any done arrays including 0-length
-                       for offset == g[0].Len() {
+                       for offset == int64(g[0].Len()) {
                                g = g[1:]
                                offset = 0
                        }
                        arr := g[0]
-                       chunkLength = Min(chunkLength, arr.Len()-offset)
+                       chunkLength = Min(chunkLength, int64(arr.Len())-offset)
 
                        offsets[i] = offset
                        groups[i] = g
@@ -263,7 +263,7 @@ func RechunkArraysConsistently(groups [][]arrow.Array) 
[][]arrow.Array {
                for i, g := range groups {
                        offset := offsets[i]
                        arr := g[0]
-                       if offset == 0 && arr.Len() == chunkLength {
+                       if offset == 0 && int64(arr.Len()) == chunkLength {
                                // slice spans entire array
                                arr.Retain()
                                rechunked[i] = append(rechunked[i], arr)

Reply via email to