You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2022/02/16 11:42:10 UTC

[GitHub] [incubator-mxnet] DominikaJedynak commented on a change in pull request #20862: Add oneDNN support for "where" operator

DominikaJedynak commented on a change in pull request #20862:
URL: https://github.com/apache/incubator-mxnet/pull/20862#discussion_r805424188



##########
File path: src/operator/nn/dnnl/dnnl_where.cc
##########
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file dnnl_where.cc
+ */
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <algorithm>
+#include <set>
+#include <unordered_set>
+#include "dnnl_where-inl.h"
+#include "operator/operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+bool SupportDNNLWhere(const std::vector<NDArray>& inputs) {
+  static const std::set<int> supported_dtypes = {
+      mshadow::kFloat32, mshadow::kBfloat16, mshadow::kInt8, mshadow::kUint8};
+  for (int i = 0; i < inputs.size(); ++i) {
+    if (!supported_dtypes.count(inputs[i].dtype()) || inputs[i].shape().Size() <= 0 ||
+        inputs[i].shape().ndim() <= 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+void DNNLWhereForward(const nnvm::NodeAttrs& attrs,
+                      const OpContext& ctx,
+                      const std::vector<NDArray>& inputs,
+                      const std::vector<OpReqType>& req,
+                      const std::vector<NDArray>& outputs) {
+  TmpMemMgr::Get()->Init(ctx.requested[0]);
+  const auto tensors = DNNLWhereFwd::Tensors(inputs, outputs[0]);
+  const auto fwd     = DNNLWhereFwd::GetCached(tensors);
+  fwd.Execute(tensors, req, ctx);
+}
+
+DNNLWhereFwd::Tensors::Tensors(const std::vector<NDArray>& inputs, const NDArray& output)
+    : condition(inputs[0]), left(inputs[1]), right(inputs[2]), output(output) {}
+
+DNNLWhereFwd DNNLWhereFwd::GetCached(const Tensors& tensors) {
+  using where_op_fwd_map = std::unordered_map<OpSignature, DNNLWhereFwd, OpHash>;
+#if DMLC_CXX11_THREAD_LOCAL
+  static thread_local where_op_fwd_map fwds;
+#else
+  static MX_THREAD_LOCAL where_op_fwd_map fwds;
+#endif
+
+  OpSignature key;
+  key.AddSign(tensors.condition);
+  key.AddSign(tensors.left);
+  key.AddSign(tensors.right);
+  key.AddSign(tensors.output);
+
+  auto it = fwds.find(key);
+  if (it == fwds.end()) {
+    DNNLWhereFwd fwd(tensors);
+    it = AddToCache(&fwds, key, fwd);
+  }
+  return it->second;
+}
+
+static mxnet::TShape GetBroadcastableShape(const mxnet::TShape& in_shape,

Review comment:
       Maybe add short documentation of functions which purpose is not obvious?
   https://wiki.ith.intel.com/pages/viewpage.action?pageId=2015909205

##########
File path: src/operator/nn/dnnl/dnnl_where.cc
##########
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file dnnl_where.cc
+ */
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <algorithm>
+#include <set>
+#include <unordered_set>
+#include "dnnl_where-inl.h"
+#include "operator/operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+bool SupportDNNLWhere(const std::vector<NDArray>& inputs) {
+  static const std::set<int> supported_dtypes = {
+      mshadow::kFloat32, mshadow::kBfloat16, mshadow::kInt8, mshadow::kUint8};
+  for (int i = 0; i < inputs.size(); ++i) {
+    if (!supported_dtypes.count(inputs[i].dtype()) || inputs[i].shape().Size() <= 0 ||
+        inputs[i].shape().ndim() <= 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+void DNNLWhereForward(const nnvm::NodeAttrs& attrs,
+                      const OpContext& ctx,
+                      const std::vector<NDArray>& inputs,
+                      const std::vector<OpReqType>& req,
+                      const std::vector<NDArray>& outputs) {
+  TmpMemMgr::Get()->Init(ctx.requested[0]);
+  const auto tensors = DNNLWhereFwd::Tensors(inputs, outputs[0]);

Review comment:
       Maybe it is better to send outputs as an argument and 'unpack' it in the Tensors constructor for consistency?

##########
File path: src/operator/nn/dnnl/dnnl_where.cc
##########
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file dnnl_where.cc
+ */
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <algorithm>
+#include <set>
+#include <unordered_set>
+#include "dnnl_where-inl.h"
+#include "operator/operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+bool SupportDNNLWhere(const std::vector<NDArray>& inputs) {
+  static const std::set<int> supported_dtypes = {
+      mshadow::kFloat32, mshadow::kBfloat16, mshadow::kInt8, mshadow::kUint8};
+  for (int i = 0; i < inputs.size(); ++i) {
+    if (!supported_dtypes.count(inputs[i].dtype()) || inputs[i].shape().Size() <= 0 ||
+        inputs[i].shape().ndim() <= 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+void DNNLWhereForward(const nnvm::NodeAttrs& attrs,
+                      const OpContext& ctx,
+                      const std::vector<NDArray>& inputs,
+                      const std::vector<OpReqType>& req,
+                      const std::vector<NDArray>& outputs) {
+  TmpMemMgr::Get()->Init(ctx.requested[0]);
+  const auto tensors = DNNLWhereFwd::Tensors(inputs, outputs[0]);
+  const auto fwd     = DNNLWhereFwd::GetCached(tensors);
+  fwd.Execute(tensors, req, ctx);
+}
+
+DNNLWhereFwd::Tensors::Tensors(const std::vector<NDArray>& inputs, const NDArray& output)
+    : condition(inputs[0]), left(inputs[1]), right(inputs[2]), output(output) {}
+
+DNNLWhereFwd DNNLWhereFwd::GetCached(const Tensors& tensors) {
+  using where_op_fwd_map = std::unordered_map<OpSignature, DNNLWhereFwd, OpHash>;
+#if DMLC_CXX11_THREAD_LOCAL
+  static thread_local where_op_fwd_map fwds;
+#else
+  static MX_THREAD_LOCAL where_op_fwd_map fwds;
+#endif
+
+  OpSignature key;
+  key.AddSign(tensors.condition);
+  key.AddSign(tensors.left);
+  key.AddSign(tensors.right);
+  key.AddSign(tensors.output);
+
+  auto it = fwds.find(key);
+  if (it == fwds.end()) {
+    DNNLWhereFwd fwd(tensors);
+    it = AddToCache(&fwds, key, fwd);
+  }
+  return it->second;
+}
+
+static mxnet::TShape GetBroadcastableShape(const mxnet::TShape& in_shape,
+                                           const mxnet::TShape& out_shape) {
+  if (in_shape == out_shape) {
+    return in_shape;
+  }
+
+  mxnet::TShape broadcastable_in_shape(out_shape.ndim(), 1);
+  const int lack_dims = out_shape.ndim() - in_shape.ndim();
+  for (int i = lack_dims; i < out_shape.ndim(); ++i) {
+    broadcastable_in_shape[i] = in_shape[i - lack_dims];
+  }
+  return broadcastable_in_shape;
+}
+
+DNNLWhereFwd::DNNLWhereFwd(const Tensors& tensors) {
+  const auto cpu_engine = CpuEngine::Get()->get_engine();
+
+  const auto cnd = tensors.condition;
+  const auto lhs = tensors.left;
+  const auto rhs = tensors.right;
+  const auto out = tensors.output;
+
+  const auto cnd_shape = GetBroadcastableShape(cnd.shape(), out.shape());
+  const auto lhs_shape = GetBroadcastableShape(lhs.shape(), out.shape());
+  const auto rhs_shape = GetBroadcastableShape(rhs.shape(), out.shape());
+
+  const auto& cnd_dtype = get_dnnl_type(cnd.dtype());
+  const auto& inp_dtype = get_dnnl_type(lhs.dtype());
+  const auto& def_ft    = static_cast<dnnl::memory::format_tag>(GetDefaultFormat(lhs_shape.ndim()));
+
+  const auto& cnd_dims    = dnnl::memory::dims(cnd_shape.begin(), cnd_shape.end());
+  const auto& lhs_dims    = dnnl::memory::dims(lhs_shape.begin(), lhs_shape.end());
+  const auto& rhs_dims    = dnnl::memory::dims(rhs_shape.begin(), rhs_shape.end());
+  const auto& out_dims    = dnnl::memory::dims(out.shape().begin(), out.shape().end());
+  const auto& scalar_dims = dnnl::memory::dims(cnd_shape.ndim(), 1);  // broadcastable scalar
+
+  auto cnd_md    = dnnl::memory::desc(cnd_dims, cnd_dtype, def_ft);
+  auto lhs_md    = dnnl::memory::desc(lhs_dims, inp_dtype, def_ft);
+  auto rhs_md    = dnnl::memory::desc(rhs_dims, inp_dtype, def_ft);
+  auto out_md    = dnnl::memory::desc(out_dims, inp_dtype, def_ft);
+  auto scalar_md = dnnl::memory::desc(scalar_dims, cnd_dtype, def_ft);
+
+  binary_eq_zero_pd = dnnl::binary::primitive_desc(
+      dnnl::binary::desc(dnnl::algorithm::binary_ne, cnd_md, scalar_md, cnd_md), cpu_engine);

Review comment:
       Names are confusing as _eq_zero_pd uses algorithm binary_ne and actually checks if entries are different then 0




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org