You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@arrow.apache.org by GitBox <gi...@apache.org> on 2021/05/21 21:46:07 UTC

[GitHub] [arrow] nirandaperera opened a new pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

nirandaperera opened a new pull request #10377:
URL: https://github.com/apache/arrow/pull/10377


   Adding a preliminary impl for an `if_else(cond: Datum, left: Datum, right: Datum)` function. It works as follows,
   ```python
   def if_else(cond, left, right):
       for c, true_val, false_val in zip(cond, left, right):
           if c:
               yield true_val
           else:
               yield false_val
   ```
   `null` values will be promoted to the output. 
   
   Current commit only provides only Array-Array-Array kernels for the function. 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] nirandaperera closed pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
nirandaperera closed pull request #10377:
URL: https://github.com/apache/arrow/pull/10377


   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] lidavidm commented on a change in pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
lidavidm commented on a change in pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#discussion_r637957534



##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)

Review comment:
       Sorry, I think I misunderstood the optimization you're thinking about. How would SIMD help here?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] github-actions[bot] commented on pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
github-actions[bot] commented on pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#issuecomment-846278900


   https://issues.apache.org/jira/browse/ARROW-10640


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] nirandaperera commented on pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
nirandaperera commented on pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#issuecomment-847045029


   > Just some quick initial feedback. I think the overall approach looks fine. Testing this thoroughly will be important.
   > 
   > For a different PR I added a `DatumFromJSON` to make it easier to test functions that accept a lot of permutations of scalar/array arguments. I could pull that out separately if that would make testing easier here. (That way you don't need a lot of overloads and boilerplate to test all the possible input combinations.)
   
   That would be great @lidavidm . If you could send me the PR# that would be great! :-) 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] lidavidm commented on pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
lidavidm commented on pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#issuecomment-847056827


   > > Just some quick initial feedback. I think the overall approach looks fine. Testing this thoroughly will be important.
   > > For a different PR I added a `DatumFromJSON` to make it easier to test functions that accept a lot of permutations of scalar/array arguments. I could pull that out separately if that would make testing easier here. (That way you don't need a lot of overloads and boilerplate to test all the possible input combinations.)
   > 
   > That would be great @lidavidm . If you could send me the PR# that would be great! :-)
   
   See #10386


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] nirandaperera commented on a change in pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
nirandaperera commented on a change in pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#discussion_r637955569



##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)

Review comment:
       Do you mean, load with mask?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] lidavidm commented on a change in pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
lidavidm commented on a change in pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#discussion_r637268115



##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>

Review comment:
       What is `swap` doing? 

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,

Review comment:
       nit: use CamelCase (`PromoteNulls`)

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    // out_buff = right & ~cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          arrow::internal::BitmapAndNot(
+                              ctx->memory_pool(), right.buffers[1]->data(), right.offset,
+                              cond.buffers[1]->data(), cond.offset, cond.length, 0));
+
+    // out_buff = left & cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[1]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, cond.length, 0));
+
+    arrow::internal::BitmapOr(out_buf->data(), 0, temp_buf->data(), 0, cond.length, 0,
+                              out_buf->mutable_data());
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_null_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    // Nothing preallocated, so we assign left into the output
+    *out = left;
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    return Status::OK();
+  }
+};
+
+template <typename Type>
+struct ResolveExec {
+  static Status Exec(KernelContext* ctx, const ExecBatch& batch, Datum* out) {
+    if (batch.length == 0) return Status::OK();

Review comment:
       Do you really need to check for this? You can assume you have three arguments.

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)

Review comment:
       I would assume memcpy already does this for you.

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    // out_buff = right & ~cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          arrow::internal::BitmapAndNot(
+                              ctx->memory_pool(), right.buffers[1]->data(), right.offset,
+                              cond.buffers[1]->data(), cond.offset, cond.length, 0));
+
+    // out_buff = left & cond
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[1]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, cond.length, 0));
+
+    arrow::internal::BitmapOr(out_buf->data(), 0, temp_buf->data(), 0, cond.length, 0,
+                              out_buf->mutable_data());
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_null_type<Type>::value>> {
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    // Nothing preallocated, so we assign left into the output
+    *out = left;
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    return Status::OK();
+  }
+};
+
+template <typename Type>
+struct ResolveExec {
+  static Status Exec(KernelContext* ctx, const ExecBatch& batch, Datum* out) {
+    if (batch.length == 0) return Status::OK();
+
+    if (batch[0].kind() == Datum::ARRAY) {
+      if (batch[1].kind() == Datum::ARRAY) {
+        if (batch[2].kind() == Datum::ARRAY) {  // AAA
+          return IfElseFunctor<Type>::Call(ctx, *batch[0].array(), *batch[1].array(),
+                                           *batch[2].array(), out->mutable_array());
+        } else {  // AAS
+          return IfElseFunctor<Type>::Call(ctx, *batch[0].array(), *batch[1].array(),
+                                           *batch[2].scalar(), out->mutable_array());
+        }
+      } else {
+        return Status::Invalid("");
+        //        if (batch[2].kind() == Datum::ARRAY) {  // ASA
+        //          return IfElseFunctor<Type, true>::Call(ctx, *batch[0].array(),
+        //                                                 *batch[2].array(),
+        //                                                 *batch[1].scalar(),
+        //                                                 out->mutable_array());
+        //        } else {  // ASS
+        //          return IfElseFunctor<Type>::Call(ctx, *batch[0].array(),
+        //          *batch[1].scalar(),
+        //                                           *batch[2].scalar(),
+        //                                           out->mutable_array());
+        //        }
+      }
+    } else {
+      if (batch[1].kind() == Datum::ARRAY) {
+        return Status::Invalid("");
+        //        if (batch[2].kind() == Datum::ARRAY) {  // SAA
+        //          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(),
+        //          *batch[1].array(),
+        //                                           *batch[2].array(),
+        //                                           out->mutable_array());
+        //        } else {  // SAS
+        //          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(),
+        //          *batch[1].array(),
+        //                                           *batch[2].scalar(),
+        //                                           out->mutable_array());
+        //        }
+      } else {
+        if (batch[2].kind() == Datum::ARRAY) {  // SSA
+          return Status::Invalid("");
+          //          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(),
+          //          *batch[1].scalar(),
+          //                                           *batch[2].array(),
+          //                                           out->mutable_array());
+        } else {  // SSS
+          return IfElseFunctor<Type>::Call(ctx, *batch[0].scalar(), *batch[1].scalar(),
+                                           *batch[2].scalar(), out->scalar().get());
+        }
+      }
+    }
+  }
+};
+
+void AddPrimitiveKernels(const std::shared_ptr<ScalarFunction>& scalar_function,

Review comment:
       A lot of these function names are a little generic, can we rename them to reflect that they're for IfElse? We might end up putting other similar kernels here.

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {

Review comment:
       (same with `enable_if_number` etc)

##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)
+    while (offset < cond.offset + cond.length) {
+      const BitBlockCount& block = bit_counter.NextWord();
+      if (block.AllSet()) {  // all from left
+        std::memcpy(out_values, left_data, block.length * sizeof(T));
+      } else if (block.popcount) {  // selectively copy from left
+        for (int64_t i = 0; i < block.length; ++i) {
+          if (BitUtil::GetBit(cond_data, offset + i)) {
+            out_values[i] = left_data[i];
+          }
+        }
+      }
+
+      offset += block.length;
+      out_values += block.length;
+      left_data += block.length;
+    }
+
+    out->buffers[1] = std::move(out_buf);
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const Scalar& right, ArrayData* out) {
+    // todo impl
+    return Status::OK();
+  }
+
+  static Status Call(KernelContext* ctx, const Scalar& cond, const Scalar& left,
+                     const Scalar& right, Scalar* out) {
+    // todo impl
+    return Status::OK();
+  }
+};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_boolean_type<Type>::value>> {

Review comment:
       nit: `type_traits.h` already has an `enable_if_boolean`




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] nirandaperera commented on a change in pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
nirandaperera commented on a change in pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#discussion_r637962492



##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>
+struct IfElseFunctor {};
+
+template <typename Type, bool swap>
+struct IfElseFunctor<Type, swap, enable_if_t<is_number_type<Type>::value>> {
+  using T = typename TypeTraits<Type>::CType;
+
+  static Status Call(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* out) {
+    ARROW_RETURN_NOT_OK(promote_nulls(ctx, cond, left, right, out));
+
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_buf,
+                          ctx->Allocate(cond.length * sizeof(T)));
+    T* out_values = reinterpret_cast<T*>(out_buf->mutable_data());
+
+    // copy right data to out_buff
+    const T* right_data = right.GetValues<T>(1);
+    std::memcpy(out_values, right_data, right.length * sizeof(T));
+
+    const auto* cond_data = cond.buffers[1]->data();  // this is a BoolArray
+    BitBlockCounter bit_counter(cond_data, cond.offset, cond.length);
+
+    // selectively copy values from left data
+    const T* left_data = left.GetValues<T>(1);
+    int64_t offset = cond.offset;
+
+    // todo this can be improved by intrinsics. ex: _mm*_mask_store_e* (vmovdqa*)

Review comment:
       Say, you first copy `right` to `ouput`. Then, `cond` becomes a mask to store `left` onto `output`. For that there are specialized SIMD instructions. 
   https://software.intel.com/sites/landingpage/IntrinsicsGuide/#cats=Store&text=mask_store&expand=5564,5566
   
   So, we can drop the `BitBlock` objects, and remove all the loops and memcpy inside the `while` loop. We'd have to handle the alignment though. 




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] nirandaperera commented on pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
nirandaperera commented on pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#issuecomment-848097418


   I am closing this PR because there are some major refactors and it would be better to review them fresh. 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [arrow] nirandaperera commented on a change in pull request #10377: ARROW-10640: [C++] A "where" kernel to combine two arrays based on a mask

Posted by GitBox <gi...@apache.org>.
nirandaperera commented on a change in pull request #10377:
URL: https://github.com/apache/arrow/pull/10377#discussion_r637949150



##########
File path: cpp/src/arrow/compute/kernels/scalar_if_else.cc
##########
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <arrow/compute/api.h>
+#include <arrow/util/bit_block_counter.h>
+#include <arrow/util/bitmap_ops.h>
+
+#include "codegen_internal.h"
+
+namespace arrow {
+using internal::BitBlockCount;
+using internal::BitBlockCounter;
+
+namespace compute {
+
+namespace {
+
+// nulls will be promoted as follows
+// cond.val && (cond.data && left.val || ~cond.data && right.val)
+Status promote_nulls(KernelContext* ctx, const ArrayData& cond, const ArrayData& left,
+                     const ArrayData& right, ArrayData* output) {
+  if (!cond.MayHaveNulls() && !left.MayHaveNulls() && !right.MayHaveNulls()) {
+    return Status::OK();  // no nulls to handle
+  }
+  const int64_t len = cond.length;
+
+  ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> out_validity, ctx->AllocateBitmap(len));
+  arrow::internal::InvertBitmap(out_validity->data(), 0, len,
+                                out_validity->mutable_data(), 0);
+  if (right.MayHaveNulls()) {
+    // out_validity = right.val && ~cond.data
+    arrow::internal::BitmapAndNot(right.buffers[0]->data(), right.offset,
+                                  cond.buffers[1]->data(), cond.offset, len, 0,
+                                  out_validity->mutable_data());
+  }
+
+  if (left.MayHaveNulls()) {
+    // tmp_buf = left.val && cond.data
+    ARROW_ASSIGN_OR_RAISE(std::shared_ptr<Buffer> temp_buf,
+                          arrow::internal::BitmapAnd(
+                              ctx->memory_pool(), left.buffers[0]->data(), left.offset,
+                              cond.buffers[1]->data(), cond.offset, len, 0));
+    // out_validity = cond.data && left.val || ~cond.data && right.val
+    arrow::internal::BitmapOr(out_validity->data(), 0, temp_buf->data(), 0, len, 0,
+                              out_validity->mutable_data());
+  }
+
+  if (cond.MayHaveNulls()) {
+    // out_validity &= cond.val
+    ::arrow::internal::BitmapAnd(out_validity->data(), 0, cond.buffers[0]->data(),
+                                 cond.offset, len, 0, out_validity->mutable_data());
+  }
+
+  output->buffers[0] = std::move(out_validity);
+  output->GetNullCount();  // update null count
+  return Status::OK();
+}
+
+template <typename Type, bool swap = false, typename Enable = void>

Review comment:
       My idea is to reuse the impl for the cases like, `cond, left: Array, right: Scalar` and `cond, left: Sca;ar, right: Array`. In the second scenario, I can swap left and right and invert the cond without changing the loop mechanism. 




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org