Skip to content

Commit

Permalink
GH-34784: [Go] Fix 32-bit build (#35767)
Browse files Browse the repository at this point in the history
### Rationale for this change
Two locations in the code cause issues when building with `GOARCH=386` (e.g. 32-bit systems).

### What changes are included in this PR?
In the `compute` package we assume a 64-bit system when using an untyped `int` to hold `math.MaxInt64` which overflows on a 32-bit system. So we just explicitly identify it as an `int64`

In the `cdata` package we use the older `*(*[maxlen]*C.void)(unsafe.Pointer(.....))[:]` syntax to retrieve the `void**` for the buffers, with maxlen set to a very large constant. Unfortunately on a 32-bit system this is larger than the address space. Instead we switch to using the `unsafe.Slice` method that was added in go1.17.

* Closes: #34784

Authored-by: Matt Topol <zotthewizard@gmail.com>
Signed-off-by: Matt Topol <zotthewizard@gmail.com>
  • Loading branch information
zeroshade authored May 26, 2023
1 parent 77a7130 commit 9eaee2a
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 13 deletions.
15 changes: 7 additions & 8 deletions go/arrow/cdata/cdata.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,12 @@ var formatToSimpleType = map[string]arrow.DataType{

// decode metadata from C which is encoded as
//
// [int32] -> number of metadata pairs
// for 0..n
// [int32] -> number of bytes in key
// [n bytes] -> key value
// [int32] -> number of bytes in value
// [n bytes] -> value
// [int32] -> number of metadata pairs
// for 0..n
// [int32] -> number of bytes in key
// [n bytes] -> key value
// [int32] -> number of bytes in value
// [n bytes] -> value
func decodeCMetadata(md *C.char) arrow.Metadata {
if md == nil {
return arrow.Metadata{}
Expand Down Expand Up @@ -413,8 +413,7 @@ func (imp *cimporter) doImport(src *CArrowArray) error {

if imp.arr.n_buffers > 0 {
// get a view of the buffers, zero-copy. we're just looking at the pointers
const maxlen = 0x7fffffff
imp.cbuffers = (*[maxlen]*C.void)(unsafe.Pointer(imp.arr.buffers))[:imp.arr.n_buffers:imp.arr.n_buffers]
imp.cbuffers = unsafe.Slice((**C.void)(unsafe.Pointer(imp.arr.buffers)), imp.arr.n_buffers)
}

// handle each of our type cases
Expand Down
10 changes: 5 additions & 5 deletions go/arrow/compute/internal/exec/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,21 +239,21 @@ func RechunkArraysConsistently(groups [][]arrow.Array) [][]arrow.Array {
}

rechunked := make([][]arrow.Array, len(groups))
offsets := make([]int, len(groups))
offsets := make([]int64, len(groups))
// scan all array vectors at once, rechunking along the way
var start int64
for start < int64(totalLen) {
// first compute max possible length for next chunk
chunkLength := math.MaxInt64
var chunkLength int64 = math.MaxInt64
for i, g := range groups {
offset := offsets[i]
// skip any done arrays including 0-length
for offset == g[0].Len() {
for offset == int64(g[0].Len()) {
g = g[1:]
offset = 0
}
arr := g[0]
chunkLength = Min(chunkLength, arr.Len()-offset)
chunkLength = Min(chunkLength, int64(arr.Len())-offset)

offsets[i] = offset
groups[i] = g
Expand All @@ -263,7 +263,7 @@ func RechunkArraysConsistently(groups [][]arrow.Array) [][]arrow.Array {
for i, g := range groups {
offset := offsets[i]
arr := g[0]
if offset == 0 && arr.Len() == chunkLength {
if offset == 0 && int64(arr.Len()) == chunkLength {
// slice spans entire array
arr.Retain()
rechunked[i] = append(rechunked[i], arr)
Expand Down

0 comments on commit 9eaee2a

Please sign in to comment.