You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2022/08/09 10:10:46 UTC

[GitHub] [incubator-mxnet] bartekkuncer commented on a diff in pull request #21115: [FEATURE] Add query_keys transformer version without split

bartekkuncer commented on code in PR #21115:
URL: https://github.com/apache/incubator-mxnet/pull/21115#discussion_r941110964


##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -86,8 +103,20 @@ static bool SgDNNLSelfAttQKInferType(const nnvm::NodeAttrs& attrs,
         << "QuantizedSelfAttentionQK only supports int8 input, while " << in_types->at(0)
         << " is given.";
 
-    TYPE_ASSIGN_CHECK(*in_types, 1, mshadow::kFloat32);
     TYPE_ASSIGN_CHECK(*in_types, 2, mshadow::kFloat32);

Review Comment:
   I understand you left this here as it is common for both paths, but it makes the code harder to read. Please consider moving it to the rest of "TYPE_ASSIGN_CHECK" calls.



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -195,24 +233,28 @@ static bool SgDNNLSelfAttStorageType(const nnvm::NodeAttrs& attrs,
   return DNNLStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs);
 }
 
+template <qk_common::mode qk_mode>
 void SgDNNLSelfAttQKOp::Initialize(const OpContext& ctx,
                                    const std::vector<NDArray>& inputs,
                                    const std::vector<OpReqType>& req,
                                    const std::vector<NDArray>& outputs) {
   using namespace dnnl;
 
-  const auto qkv_tensor = inputs[0];
-  const auto out_tensor = outputs[0];
+  const auto input_tensor = inputs[0];
+  const auto out_tensor   = outputs[0];
 
-  const auto qkv_dtype = get_dnnl_type(qkv_tensor.dtype());
+  const auto qkv_dtype = get_dnnl_type(input_tensor.dtype());

Review Comment:
   If you changed qkv_tensor to input_tensor, should this not be changed as well?



##########
src/operator/subgraph/dnnl/dnnl_transformer_qk_common.h:
##########
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+#define MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <string>
+#include <vector>
+
+#include "operator/contrib/transformer-inl.h"
+#include "operator/numpy/np_matrix_op-inl.h"
+#include "operator/tensor/matrix_op-inl.h"
+#include "operator/subgraph/common.h"
+#include "dnnl_common.h"
+#include "dnnl_subgraph_base-inl.h"
+#include "dnnl_transformer-inl.h"
+
+namespace mxnet {
+namespace op {
+namespace qk_common {
+
+enum SelectStatusTransformerQK {
+  kFail = 0,
+  kStart,
+  kFirstSwapAx,
+  kSecondSwapAx,
+  kFirstReshape,
+  kSecondReshape,
+  kSuccess
+};
+
+// /*
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSecondReshape ---> kSuccess
+// OR
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSuccess
+// Each status except kStart is connected with kFail
+// */
+
+enum mode { include_split = 0, without_split };
+
+inline bool CheckSwapAxisConditionsQK(const BiDirectedNode& input_node) {
+  if (input_node.outputs.size() != 1)
+    return false;
+  return CheckSwapAxisConditions(*input_node.node);
+}
+
+inline bool CheckReshapeConditionsQK(const BiDirectedNode& input_node, const index_t out_index) {
+  if (input_node.outputs.size() != 1)
+    return false;
+  return CheckReshapeConditions(*input_node.node, out_index);
+}
+
+inline bool CheckSplitConditions(const std::vector<const BiDirectedNode*>& matched_list,
+                                 const BiDirectedNode& node) {
+  const SplitParam& param = dmlc::get<SplitParam>(node.node->attrs.parsed);
+
+  if (param.axis != -1 || param.sections != 3 || param.squeeze_axis)
+    return false;
+
+  const auto first_reshape  = (*(matched_list.end() - 2))->node;
+  const auto second_reshape = (*(matched_list.end() - 1))->node;
+  if (first_reshape->op() != Op::Get("_npx_reshape") ||
+      second_reshape->op() != Op::Get("_npx_reshape")) {
+    return false;
+  }
+  // 3 sections - ensure that every output is used only once
+  if (node.outputs.size() == 3 && node.outputs.count(first_reshape) &&
+      node.outputs.count(second_reshape)) {
+    return true;
+  }
+
+  return false;
+}
+
+inline bool Select(SelectStatusTransformerQK* status,

Review Comment:
   Why pointers instead of references?



##########
src/operator/subgraph/dnnl/dnnl_transformer_qk_common.h:
##########
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+#define MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <string>
+#include <vector>
+
+#include "operator/contrib/transformer-inl.h"
+#include "operator/numpy/np_matrix_op-inl.h"
+#include "operator/tensor/matrix_op-inl.h"
+#include "operator/subgraph/common.h"
+#include "dnnl_common.h"
+#include "dnnl_subgraph_base-inl.h"
+#include "dnnl_transformer-inl.h"
+
+namespace mxnet {
+namespace op {
+namespace qk_common {
+
+enum SelectStatusTransformerQK {
+  kFail = 0,
+  kStart,
+  kFirstSwapAx,
+  kSecondSwapAx,
+  kFirstReshape,
+  kSecondReshape,
+  kSuccess
+};
+
+// /*
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSecondReshape ---> kSuccess
+// OR
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSuccess
+// Each status except kStart is connected with kFail
+// */
+
+enum mode { include_split = 0, without_split };
+
+inline bool CheckSwapAxisConditionsQK(const BiDirectedNode& input_node) {
+  if (input_node.outputs.size() != 1)
+    return false;
+  return CheckSwapAxisConditions(*input_node.node);
+}
+
+inline bool CheckReshapeConditionsQK(const BiDirectedNode& input_node, const index_t out_index) {
+  if (input_node.outputs.size() != 1)
+    return false;
+  return CheckReshapeConditions(*input_node.node, out_index);
+}
+
+inline bool CheckSplitConditions(const std::vector<const BiDirectedNode*>& matched_list,
+                                 const BiDirectedNode& node) {
+  const SplitParam& param = dmlc::get<SplitParam>(node.node->attrs.parsed);
+
+  if (param.axis != -1 || param.sections != 3 || param.squeeze_axis)
+    return false;
+
+  const auto first_reshape  = (*(matched_list.end() - 2))->node;
+  const auto second_reshape = (*(matched_list.end() - 1))->node;
+  if (first_reshape->op() != Op::Get("_npx_reshape") ||
+      second_reshape->op() != Op::Get("_npx_reshape")) {
+    return false;
+  }
+  // 3 sections - ensure that every output is used only once
+  if (node.outputs.size() == 3 && node.outputs.count(first_reshape) &&
+      node.outputs.count(second_reshape)) {
+    return true;
+  }
+
+  return false;
+}
+
+inline bool Select(SelectStatusTransformerQK* status,
+                   std::vector<const BiDirectedNode*>* matched_list,
+                   const BiDirectedNode& seed_node,
+                   const std::shared_ptr<NodeAttr>& node_attr) {
+  if (seed_node.node->op() == Op::Get("batch_dot")) {
+    *status = kStart;
+    matched_list->clear();
+    matched_list->push_back(&seed_node);
+    return true;
+  }
+  return false;
+}
+
+template <mode qk_mode>
+bool SelectInput(SelectStatusTransformerQK* status,
+                 std::vector<const BiDirectedNode*>* matched_list,
+                 const BiDirectedNode& n,
+                 const BiDirectedNode& input_node) {
+  if (*status == kFail || *status == kSuccess || input_node.node->is_variable())
+    return false;
+  const auto& raw_input_node = *input_node.node;
+  switch (*status) {
+    case kStart:
+      if (raw_input_node.op() == Op::Get("SwapAxis")) {
+        if (CheckSwapAxisConditionsQK(input_node)) {
+          *status = kFirstSwapAx;
+          matched_list->push_back(&input_node);
+        }
+        return true;
+      }
+      break;
+    case kFirstSwapAx:
+      if (raw_input_node.op() == Op::Get("SwapAxis")) {
+        if (CheckSwapAxisConditionsQK(input_node)) {
+          *status = kSecondSwapAx;
+          matched_list->push_back(&input_node);
+          return true;
+        }
+      }
+      break;
+    case kSecondSwapAx:
+      if (raw_input_node.op() == Op::Get("_npx_reshape")) {
+        // input to reshape must be first or second output from split
+        if (CheckReshapeConditionsQK(input_node, 0) || CheckReshapeConditionsQK(input_node, 1)) {
+          *status = kFirstReshape;
+          matched_list->push_back(&input_node);
+          return true;
+        }
+      }
+      break;
+    case kFirstReshape:
+      if (raw_input_node.op() == Op::Get("_npx_reshape")) {
+        if (CheckReshapeConditionsQK(input_node, 0) || CheckReshapeConditionsQK(input_node, 1)) {
+          if constexpr (qk_mode == mode::include_split) {
+            *status = kSecondReshape;
+          } else {
+            *status = kSuccess;
+          }
+          matched_list->push_back(&input_node);
+          return true;
+        }
+      }
+      break;
+    case kSecondReshape:
+      if (raw_input_node.op() == Op::Get("_split_v2") &&
+          CheckSplitConditions(*matched_list, input_node)) {
+        *status = kSuccess;
+        return true;
+      }
+      break;
+    default:
+      *status = kFail;
+      return false;
+  }
+  return false;
+}
+
+inline std::vector<BiDirectedNode*> Filter(const SelectStatusTransformerQK& status,
+                                           const std::vector<const BiDirectedNode*>& matched_list,
+                                           const std::vector<BiDirectedNode*>& candidates) {
+  if (status != kSuccess) {
+    return std::vector<BiDirectedNode*>(0);
+  } else {
+    std::vector<BiDirectedNode*> ret;
+    for (auto i : matched_list) {
+      auto non_const_i = const_cast<BiDirectedNode*>(i);
+      if (std::find(candidates.begin(), candidates.end(), non_const_i) != candidates.end()) {
+        ret.push_back(non_const_i);
+      }
+    }
+    return ret;

Review Comment:
   Why not just return candidates?



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -37,46 +38,62 @@ namespace op {
 
 DMLC_REGISTER_PARAMETER(DNNLSelfAttParam);
 
+template <qk_common::mode qk_mode>
 static bool SgDNNLSelfAttShape(const NodeAttrs& attrs,
                                mxnet::ShapeVector* in_shape,
                                mxnet::ShapeVector* out_shape) {
   const auto& params = nnvm::get<DNNLSelfAttParam>(attrs.parsed);
-  auto qkv_shape     = in_shape->at(0);
-  CHECK_EQ(qkv_shape.ndim(), 3U)
+  uint in_shape_num  = 1;
+  auto input_shape   = in_shape->at(0);
+  CHECK_EQ(input_shape.ndim(), 3U)
       << "Input queries_keys_values should be 3D in batch-seq_length-proj_dim, "
-      << "but the given tensor is " << qkv_shape.ndim() << "D";
+      << "but the given tensor is " << input_shape.ndim() << "D";
 
-  if (params.quantized) {
-    CHECK_EQ(in_shape->size(), 3U) << "Input: [queries_keys_values, min_qkv, max_qkv] "
-                                   << "- currently have " << in_shape->size() << " inputs";
+  if constexpr (qk_mode == qk_common::mode::without_split) {
+    CHECK_EQ(in_shape->at(0), in_shape->at(1));
+    in_shape_num = 2;
+  }
 
-    SHAPE_ASSIGN_CHECK(*in_shape, 1, mxnet::TShape({1}));
+  if (params.quantized) {
+    CHECK_EQ(in_shape->size(), 3 * in_shape_num)
+        << "Input: [queries_keys_values, min_qkv, max_qkv] "
+        << "- currently have " << in_shape->size() << " inputs";
     SHAPE_ASSIGN_CHECK(*in_shape, 2, mxnet::TShape({1}));
+    if constexpr (qk_mode == qk_common::mode::without_split) {
+      SHAPE_ASSIGN_CHECK(*in_shape, 3, mxnet::TShape({1}));
+      SHAPE_ASSIGN_CHECK(*in_shape, 4, mxnet::TShape({1}));
+      SHAPE_ASSIGN_CHECK(*in_shape, 5, mxnet::TShape({1}));
+    } else {
+      SHAPE_ASSIGN_CHECK(*in_shape, 1, mxnet::TShape({1}));
+    }
 
     out_shape->resize(3);
-    SHAPE_ASSIGN_CHECK(
-        *out_shape, 0, mxnet::TShape({qkv_shape[0], params.heads, qkv_shape[1], qkv_shape[1]}));
     if (!params.enabled_float_output.has_value()) {
       SHAPE_ASSIGN_CHECK(*out_shape, 1, mxnet::TShape({1}));  // min output
       SHAPE_ASSIGN_CHECK(*out_shape, 2, mxnet::TShape({1}));  // max output
     }
   } else {
-    CHECK_EQ(in_shape->size(), 1U)
+    CHECK_EQ(in_shape->size(), in_shape_num)
         << "Input:[queries_keys_values] - currently have " << in_shape->size() << " inputs";
     out_shape->resize(1);
-    SHAPE_ASSIGN_CHECK(
-        *out_shape, 0, mxnet::TShape({qkv_shape[0], params.heads, qkv_shape[1], qkv_shape[1]}));
   }
 
+  SHAPE_ASSIGN_CHECK(
+      *out_shape, 0, mxnet::TShape({input_shape[0], params.heads, input_shape[1], input_shape[1]}));
   return true;
 }
 
+template <qk_common::mode qk_mode>
 static bool SgDNNLSelfAttQKInferType(const nnvm::NodeAttrs& attrs,
                                      std::vector<int>* in_types,
                                      std::vector<int>* out_types) {
   const auto& params = nnvm::get<DNNLSelfAttParam>(attrs.parsed);
+  uint in_shape_num  = 1;
+  if constexpr (qk_mode == qk_common::mode::without_split) {
+    in_shape_num = 2;
+  }

Review Comment:
   ```suggestion
     const uint in_shape_num  = qk_mode == qk_common::mode::without_split ? 2 : 1;
   ```



##########
src/operator/subgraph/dnnl/dnnl_transformer_qk_common.h:
##########
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+#define MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <string>
+#include <vector>
+
+#include "operator/contrib/transformer-inl.h"
+#include "operator/numpy/np_matrix_op-inl.h"
+#include "operator/tensor/matrix_op-inl.h"
+#include "operator/subgraph/common.h"
+#include "dnnl_common.h"
+#include "dnnl_subgraph_base-inl.h"
+#include "dnnl_transformer-inl.h"
+
+namespace mxnet {
+namespace op {
+namespace qk_common {
+
+enum SelectStatusTransformerQK {
+  kFail = 0,
+  kStart,
+  kFirstSwapAx,
+  kSecondSwapAx,
+  kFirstReshape,
+  kSecondReshape,
+  kSuccess
+};
+
+// /*
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSecondReshape ---> kSuccess
+// OR
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSuccess
+// Each status except kStart is connected with kFail

Review Comment:
   ```suggestion
   // Each status except kStart is connected with kFail.
   ```



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -28,6 +28,7 @@
 #include "operator/tensor/elemwise_unary_op.h"
 #include "operator/subgraph/common.h"
 #include "dnnl_transformer-inl.h"
+#include "dnnl_transformer_qk_common.h"
 
 // 3 tensors within one (queries key values) =

Review Comment:
   ```suggestion
   // 3 tensors within one (queries key values)
   ```



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -195,24 +233,28 @@ static bool SgDNNLSelfAttStorageType(const nnvm::NodeAttrs& attrs,
   return DNNLStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs);
 }
 
+template <qk_common::mode qk_mode>
 void SgDNNLSelfAttQKOp::Initialize(const OpContext& ctx,
                                    const std::vector<NDArray>& inputs,
                                    const std::vector<OpReqType>& req,
                                    const std::vector<NDArray>& outputs) {
   using namespace dnnl;
 
-  const auto qkv_tensor = inputs[0];
-  const auto out_tensor = outputs[0];
+  const auto input_tensor = inputs[0];
+  const auto out_tensor   = outputs[0];
 
-  const auto qkv_dtype = get_dnnl_type(qkv_tensor.dtype());
+  const auto qkv_dtype = get_dnnl_type(input_tensor.dtype());
 
   const memory::dim heads          = param_.heads;
   const memory::dim sequences      = inputs[0].shape()[0];
   const memory::dim qkv_seq_len    = inputs[0].shape()[1];
   const memory::dim output_lin_dim = inputs[0].shape()[2];

Review Comment:
   ```suggestion
     const memory::dim sequences      = input_tensor.shape()[0];
     const memory::dim qkv_seq_len    = input_tensor.shape()[1];
     const memory::dim output_lin_dim = input_tensor.shape()[2];
   ```



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -86,8 +103,20 @@ static bool SgDNNLSelfAttQKInferType(const nnvm::NodeAttrs& attrs,
         << "QuantizedSelfAttentionQK only supports int8 input, while " << in_types->at(0)
         << " is given.";
 
-    TYPE_ASSIGN_CHECK(*in_types, 1, mshadow::kFloat32);
     TYPE_ASSIGN_CHECK(*in_types, 2, mshadow::kFloat32);
+    if constexpr (qk_mode == qk_common::mode::without_split) {
+      if (in_types->at(1) == mshadow::kBfloat16) {
+        return false;
+      }
+      CHECK(in_types->at(1) == mshadow::kInt8)
+          << "QuantizedSelfAttentionQK only supports int8 input, while " << in_types->at(0)

Review Comment:
   ```suggestion
             << "QuantizedSelfAttentionQK only supports int8 input, while " << in_types->at(1)
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@mxnet.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org