lidavidm commented on a change in pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#discussion_r637268115



##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, 
ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, 
len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, 
cond.buffers[0]->data(),
+                                 cond.offset, len, 0, 
out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>

Review comment:
       What is `swap` doing? 

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,

Review comment:
       nit: use CamelCase (`PromoteNulls`)

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, 
ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, 
len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, 
cond.buffers[0]->data(),
+                                 cond.offset, len, 0, 
out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* 
(vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    // out_buff = right & ~cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          arrow::internal::BitmapAndNot(
+                              ctx->memory_pool(), right.buffers[1]->data(), 
right.offset,
+                              cond.buffers[1]->data(), cond.offset, 
cond.length, 0));
+
+    // out_buff = left & cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[1]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, 
cond.length, 0));
+
+    arrow::internal::BitmapOr(out_buf->data(), 0, temp_buf->data(), 0, 
cond.length, 0,
+                              out_buf->mutable_data());
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_null_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    // Nothing preallocated, so we assign left into the output
+    *out = left;
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    return Status::OK();
+  }
+};
+
+template <typename Type>
+struct ResolveExec {
+  static Status Exec(KernelContext* ctx, const ExecBatch& batch, Datum* out) {
+    if (batch.length == 0) return Status::OK();

Review comment:
       Do you really need to check for this? You can assume you have three 
arguments.

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, 
ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, 
len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, 
cond.buffers[0]->data(),
+                                 cond.offset, len, 0, 
out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* 
(vmovdqa*)

Review comment:
       I would assume memcpy already does this for you.

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, 
ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, 
len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, 
cond.buffers[0]->data(),
+                                 cond.offset, len, 0, 
out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* 
(vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    // out_buff = right & ~cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          arrow::internal::BitmapAndNot(
+                              ctx->memory_pool(), right.buffers[1]->data(), 
right.offset,
+                              cond.buffers[1]->data(), cond.offset, 
cond.length, 0));
+
+    // out_buff = left & cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[1]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, 
cond.length, 0));
+
+    arrow::internal::BitmapOr(out_buf->data(), 0, temp_buf->data(), 0, 
cond.length, 0,
+                              out_buf->mutable_data());
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_null_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    // Nothing preallocated, so we assign left into the output
+    *out = left;
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    return Status::OK();
+  }
+};
+
+template <typename Type>
+struct ResolveExec {
+  static Status Exec(KernelContext* ctx, const ExecBatch& batch, Datum* out) {
+    if (batch.length == 0) return Status::OK();
+
+    if (batch[0].kind() == Datum::ARRAY) {
+      if (batch[1].kind() == Datum::ARRAY) {
+        if (batch[2].kind() == Datum::ARRAY) {  // AAA
+          return IfElseFunctor<Type>::Call(ctx, *batch[0].array(), 
*batch[1].array(),
+                                           *batch[2].array(), 
out->mutable_array());
+        } else {  // AAS
+          return IfElseFunctor<Type>::Call(ctx, *batch[0].array(), 
*batch[1].array(),
+                                           *batch[2].scalar(), 
out->mutable_array());
+        }
+      } else {
+        return Status::Invalid("");
+        //        if (batch[2].kind() == Datum::ARRAY) {  // ASA
+        //          return IfElseFunctor<Type, true>::Call(ctx, 
*batch[0].array(),
+        //                                                 *batch[2].array(),
+        //                                                 *batch[1].scalar(),
+        //                                                 
out->mutable_array());
+        //        } else {  // ASS
+        //          return IfElseFunctor<Type>::Call(ctx, *batch[0].array(),
+        //          *batch[1].scalar(),
+        //                                           *batch[2].scalar(),
+        //                                           out->mutable_array());
+        //        }
+      }
+    } else {
+      if (batch[1].kind() == Datum::ARRAY) {
+        return Status::Invalid("");
+        //        if (batch[2].kind() == Datum::ARRAY) {  // SAA
+        //          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(),
+        //          *batch[1].array(),
+        //                                           *batch[2].array(),
+        //                                           out->mutable_array());
+        //        } else {  // SAS
+        //          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(),
+        //          *batch[1].array(),
+        //                                           *batch[2].scalar(),
+        //                                           out->mutable_array());
+        //        }
+      } else {
+        if (batch[2].kind() == Datum::ARRAY) {  // SSA
+          return Status::Invalid("");
+          //          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(),
+          //          *batch[1].scalar(),
+          //                                           *batch[2].array(),
+          //                                           out->mutable_array());
+        } else {  // SSS
+          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(), 
*batch[1].scalar(),
+                                           *batch[2].scalar(), 
out->scalar().get());
+        }
+      }
+    }
+  }
+};
+
+void AddPrimitiveKernels(const std::shared_ptr<ScalarFunction>& 
scalar_function,

Review comment:
       A lot of these function names are a little generic, can we rename them 
to reflect that they're for IfElse? We might end up putting other similar 
kernels here.

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, 
ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, 
len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, 
cond.buffers[0]->data(),
+                                 cond.offset, len, 0, 
out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* 
(vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {

Review comment:
       (same with `enable_if_number` etc)

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, 
ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), 
left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, 
len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, 
cond.buffers[0]->data(),
+                                 cond.offset, len, 0, 
out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* 
(vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const 
ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& 
left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {

Review comment:
       nit: `type_traits.h` already has an `enable_if_boolean`




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to