You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by bg...@apache.org on 2022/08/12 07:25:47 UTC

[incubator-mxnet] branch master updated: Add support for bool data type for condition in where operator (#21103)

This is an automated email from the ASF dual-hosted git repository.

bgawrych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 736313f4e7 Add support for bool data type for condition in where operator (#21103)
736313f4e7 is described below

commit 736313f4e7e35563418df2285ff521242324ed1a
Author: bgawrych <ba...@intel.com>
AuthorDate: Fri Aug 12 09:25:31 2022 +0200

    Add support for bool data type for condition in where operator (#21103)
    
    * Add support for bool data type for condition in where operator
    
    * Update src/operator/nn/dnnl/dnnl_where.cc
    
    Co-authored-by: bartekkuncer <ba...@intel.com>
    
    * Update src/operator/nn/dnnl/dnnl_where.cc
    
    Co-authored-by: bartekkuncer <ba...@intel.com>
    
    * apply review comment
---
 src/operator/nn/dnnl/dnnl_base-inl.h |  3 ++-
 src/operator/nn/dnnl/dnnl_where.cc   | 46 +++++++++++++++++++++++++++++-------
 2 files changed, 40 insertions(+), 9 deletions(-)

diff --git a/src/operator/nn/dnnl/dnnl_base-inl.h b/src/operator/nn/dnnl/dnnl_base-inl.h
index 7cf4eee5c5..ea43a02165 100644
--- a/src/operator/nn/dnnl/dnnl_base-inl.h
+++ b/src/operator/nn/dnnl/dnnl_base-inl.h
@@ -276,7 +276,7 @@ void DNNLMemorySum(const dnnl::memory& arr1, const dnnl::memory& arr2, const dnn
 
 static int GetTypeSize(int dtype) {
   int size = -1;
-  MSHADOW_TYPE_SWITCH(dtype, DType, { size = sizeof(DType); });
+  MSHADOW_TYPE_SWITCH_WITH_BOOL(dtype, DType, { size = sizeof(DType); });
   return size;
 }
 
@@ -298,6 +298,7 @@ static inline dnnl::memory::data_type get_dnnl_type(int dtype) {
     case mshadow::kInt8:
       return dnnl::memory::data_type::s8;
     case mshadow::kUint8:
+    case mshadow::kBool:
       return dnnl::memory::data_type::u8;
     default:
       LOG(FATAL) << "unknown type for oneDNN :" << static_cast<int>(dtype);
diff --git a/src/operator/nn/dnnl/dnnl_where.cc b/src/operator/nn/dnnl/dnnl_where.cc
index 8e225b471e..1d9c08a541 100644
--- a/src/operator/nn/dnnl/dnnl_where.cc
+++ b/src/operator/nn/dnnl/dnnl_where.cc
@@ -34,7 +34,15 @@ namespace op {
 
 // Support for https://oneapi-src.github.io/oneDNN/v2.6/dev_guide_binary.html
 bool SupportDNNLWhere(const std::vector<NDArray>& inputs) {
-  return SupportDNNL<DNNLTypeMode::NoInt32, DNNLTensorsDtypes::Mixed>(inputs);
+  if (inputs[0].dtype() == mshadow::kBool) {
+    // oneDNN natively doesn't support bool type, however this operator was written
+    // to allow using bool type for 'condition' tensor - data will be treated as uint8
+    return SupportDNNLShape<1, 12>(inputs[0].shape()) &&
+           SupportDNNL<DNNLTypeMode::NoInt32, DNNLTensorsDtypes::AllSame>({inputs[1], inputs[2]});
+  }
+
+  return SupportDNNL<DNNLTypeMode::NoInt32>(inputs[0]) &&
+         SupportDNNL<DNNLTypeMode::NoInt32, DNNLTensorsDtypes::AllSame>({inputs[1], inputs[2]});
 }
 
 void DNNLWhereForward(const nnvm::NodeAttrs& attrs,
@@ -95,6 +103,25 @@ static mxnet::TShape GetBroadcastableShape(const mxnet::TShape& in_shape,
   return broadcastable_in_shape;
 }
 
+/*!
+ * \brief Create shape vector basing on two input shapes
+ * \param first_shape first input shape
+ * \param second_shape second input shape
+ * \return deduced broadcasted shape based on first_shape and second_shape
+ */
+static mxnet::TShape GetBroadcastedShape(const mxnet::TShape& first_shape,
+                                         const mxnet::TShape& second_shape) {
+  if (first_shape == second_shape) {
+    return first_shape;
+  }
+
+  mxnet::TShape dst_shape(first_shape.ndim(), 1);
+  for (int i = 0; i < first_shape.ndim(); ++i) {
+    dst_shape[i] = first_shape[i] == 1 ? second_shape[i] : first_shape[i];
+  }
+  return dst_shape;
+}
+
 DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
   const auto cpu_engine = CpuEngine::Get()->get_engine();
 
@@ -107,7 +134,8 @@ DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
   const auto lhs_shape = GetBroadcastableShape(lhs.shape(), out.shape());
   const auto rhs_shape = GetBroadcastableShape(rhs.shape(), out.shape());
 
-  const auto& cnd_dtype = get_dnnl_type(cnd.dtype());
+  const auto& cnd_dtype =
+      cnd.dtype() != mshadow::kBool ? get_dnnl_type(cnd.dtype()) : dnnl::memory::data_type::u8;
   const auto& inp_dtype = get_dnnl_type(lhs.dtype());
   const auto& def_ft    = static_cast<dnnl::memory::format_tag>(GetDefaultFormat(lhs_shape.ndim()));
 
@@ -129,14 +157,16 @@ DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
       dnnl::binary::desc(dnnl::algorithm::binary_eq, cnd_md, scalar_md, cnd_md), cpu_engine);
 
   // if broadcast is needed output must be larger in size
-  auto lmask_dim  = lhs_shape.Size() > cnd_shape.Size() ? lhs_dims : cnd_dims;
-  auto lmask_md   = dnnl::memory::desc(lmask_dim, inp_dtype, def_ft);
-  binary_mul_l_pd = dnnl::binary::primitive_desc(
+  const auto lmask_shape = GetBroadcastedShape(lhs_shape, cnd_shape);
+  const auto lmask_dim   = dnnl::memory::dims(lmask_shape.begin(), lmask_shape.end());
+  auto lmask_md          = dnnl::memory::desc(lmask_dim, inp_dtype, def_ft);
+  binary_mul_l_pd        = dnnl::binary::primitive_desc(
       dnnl::binary::desc(dnnl::algorithm::binary_mul, lhs_md, cnd_md, lmask_md), cpu_engine);
 
-  auto rmask_dim  = rhs_shape.Size() > cnd_shape.Size() ? rhs_dims : cnd_dims;
-  auto rmask_md   = dnnl::memory::desc(rmask_dim, inp_dtype, def_ft);
-  binary_mul_r_pd = dnnl::binary::primitive_desc(
+  const auto rmask_shape = GetBroadcastedShape(rhs_shape, cnd_shape);
+  const auto rmask_dim   = dnnl::memory::dims(rmask_shape.begin(), rmask_shape.end());
+  auto rmask_md          = dnnl::memory::desc(rmask_dim, inp_dtype, def_ft);
+  binary_mul_r_pd        = dnnl::binary::primitive_desc(
       dnnl::binary::desc(dnnl::algorithm::binary_mul, rhs_md, cnd_md, rmask_md), cpu_engine);
 
   binary_sum_pd = dnnl::binary::primitive_desc(