You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/08/21 15:06:46 UTC

[GitHub] anirudh2290 closed pull request #11831: [MXNET-484] MKLDNN C++ test for LRN operator

anirudh2290 closed pull request #11831: [MXNET-484] MKLDNN C++ test for LRN operator
URL: https://github.com/apache/incubator-mxnet/pull/11831
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/src/operator/nn/lrn-inl.h b/src/operator/nn/lrn-inl.h
index cb441de9927..63044959812 100644
--- a/src/operator/nn/lrn-inl.h
+++ b/src/operator/nn/lrn-inl.h
@@ -114,6 +114,7 @@ void LRNBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
                  const TBlob &out_grad, const TBlob &in_data,
                  const TBlob &out_norm, const OpReqType &req,
                  const TBlob &in_grad) {
+  // LRNBackwards does not support kAddTo or kWriteInPlace
   using namespace mshadow;
   using namespace mshadow::expr;
   const LRNParam& param_ = nnvm::get<LRNParam>(attrs.parsed);
diff --git a/src/operator/nn/mkldnn/mkldnn_lrn-inl.h b/src/operator/nn/mkldnn/mkldnn_lrn-inl.h
index adb72a2a9c4..4b179a7fbc9 100644
--- a/src/operator/nn/mkldnn/mkldnn_lrn-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_lrn-inl.h
@@ -61,7 +61,7 @@ inline lrn_forward::primitive_desc GetLRNFwdDesc(const LRNParam &param,
 
 inline mkldnn::lrn_backward::primitive_desc
 GetLRNBwd(const LRNParam &param,
-          const mkldnn::memory::desc &diff_in_md,
+          const mkldnn::memory::desc &data_in_md,
           const mkldnn::memory::desc &diff_md,
           const lrn_forward::primitive_desc &lrnFwd_desc) {
   mkldnn::engine &engine = CpuEngine::Get()->get_engine();
@@ -71,7 +71,7 @@ GetLRNBwd(const LRNParam &param,
   const int nsize = param.nsize;
   const float k = param.knorm;
 
-  lrn_backward::desc lrnBwd_desc(alg, diff_in_md,
+  lrn_backward::desc lrnBwd_desc(alg, data_in_md,
                 diff_md, nsize, alpha, beta, k);
   return mkldnn::lrn_backward::primitive_desc(lrnBwd_desc,
                                engine, lrnFwd_desc);
@@ -92,16 +92,18 @@ class MKLDNNLRNFwd {
 
   ~MKLDNNLRNFwd() {}
 
-  void SetDataHandle(const NDArray &data,
-                     const NDArray &output);
+  void SetNewMem(const NDArray &data,
+                 const NDArray &output,
+                 const OpReqType req);
 
-  void Execute();
+  void Execute(const NDArray &out_data);
 
  private:
   std::shared_ptr<mkldnn::lrn_forward> fwd;
   std::shared_ptr<mkldnn::memory> in_mem;
   std::shared_ptr<mkldnn::memory> out_mem;
   std::shared_ptr<mkldnn::memory> ws_mem;
+  mkldnn_output_t output_mem_t;
   bool is_train;
 
  private:
@@ -131,17 +133,18 @@ void MKLDNNLRNFwd::_Init(const LRNParam &param,
   }
 }
 
-void MKLDNNLRNFwd::SetDataHandle(const NDArray &in_data,
-                                 const NDArray &out_data) {
-  const mkldnn::memory *in_data_mem   = in_data.GetMKLDNNData();
-  mkldnn::memory *out_data_mem  = const_cast<NDArray&>(out_data).CreateMKLDNNData(
-                       this->out_mem->get_primitive_desc());
+void MKLDNNLRNFwd::SetNewMem(const NDArray &in_data,
+                             const NDArray &out_data,
+                             const OpReqType req) {
+  const mkldnn::memory *in_data_mem = in_data.GetMKLDNNData();
+  output_mem_t = CreateMKLDNNMem(out_data, this->out_mem->get_primitive_desc(), req);
   this->in_mem->set_data_handle(in_data_mem->get_data_handle());
-  this->out_mem->set_data_handle(out_data_mem->get_data_handle());
+  this->out_mem->set_data_handle(output_mem_t.second->get_data_handle());
 }
 
-void MKLDNNLRNFwd::Execute() {
+void MKLDNNLRNFwd::Execute(const NDArray &out_data) {
   MKLDNNStream::Get()->RegisterPrim(*(this->fwd));
+  CommitOutput(out_data, output_mem_t);
   MKLDNNStream::Get()->Submit();
 }
 // End of LRN Class and its functions
@@ -187,9 +190,12 @@ void MKLDNNLRNForward(const OpContext &ctx,
                       const NDArray &in_data,
                       const OpReqType req,
                       const NDArray &out_data) {
-  MKLDNNLRNFwd fwd = GetLRNFwd(param, ctx, in_data);
-  fwd.SetDataHandle(in_data, out_data);
-  fwd.Execute();
+  auto in_buffer = in_data;
+  if (in_buffer.IsView() && in_buffer.IsMKLDNNData())
+    in_buffer = in_buffer.Reorder2Default();
+  MKLDNNLRNFwd fwd = GetLRNFwd(param, ctx, in_buffer);
+  fwd.SetNewMem(in_buffer, out_data, req);
+  fwd.Execute(out_data);
 }
 
 void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam &param,
@@ -200,8 +206,15 @@ void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam &param,
   if (req == kNullOp) {
     return;
   }
+
+  // TODO(alex): (MXNET-846) figure out why in_grad output incorrect when in_data is nchw8c
+  auto in_buffer = in_data;
+  if (in_buffer.IsMKLDNNData()) {
+    in_buffer = in_data.Reorder2Default();
+  }
+
   // Repeat FW for getting workspace
-  const mkldnn::memory *data_mem = in_data.GetMKLDNNData();
+  const mkldnn::memory *data_mem = in_buffer.GetMKLDNNData();
   const mkldnn::memory::desc data_md = data_mem->get_primitive_desc().desc();
   const lrn_forward::primitive_desc pdesc_fwd = GetLRNFwdDesc(param, ctx.is_train,
                                                               data_md);
@@ -218,10 +231,9 @@ void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam &param,
           lrn_forward(pdesc_fwd, mkldnn::primitive::at(*data_mem),
           *ws_mem, *dst_temp));
 
-  const mkldnn::memory::desc data_in_md = pdesc_fwd.src_primitive_desc().desc();
   const mkldnn::memory *diff_mem = out_grad.GetMKLDNNData();
   const mkldnn::memory::desc diff_md = diff_mem->get_primitive_desc().desc();
-  const mkldnn::lrn_backward::primitive_desc pdesc_bwd = GetLRNBwd(param, data_in_md,
+  const mkldnn::lrn_backward::primitive_desc pdesc_bwd = GetLRNBwd(param, data_md,
                                                                    diff_md, pdesc_fwd);
   mkldnn_output_t diff_src_mem = CreateMKLDNNMem(in_grad,
                                                  pdesc_bwd.diff_src_primitive_desc(), req);
@@ -229,6 +241,7 @@ void MKLDNNLRNBackward(const OpContext &ctx, const LRNParam &param,
   MKLDNNStream::Get()->RegisterPrim(
         lrn_backward(pdesc_bwd, mkldnn::primitive::at(*data_mem),
         mkldnn::primitive::at(*diff_mem), *ws_mem, *diff_src_mem.second));
+  CommitOutput(in_grad, diff_src_mem);
   MKLDNNStream::Get()->Submit();
 }
 }  // namespace op
diff --git a/tests/cpp/operator/mkldnn.cc b/tests/cpp/operator/mkldnn.cc
index 59bd3a547b7..14578bec561 100644
--- a/tests/cpp/operator/mkldnn.cc
+++ b/tests/cpp/operator/mkldnn.cc
@@ -105,8 +105,7 @@ static void InitDefaultArray(NDArray *arr, bool is_rand = false) {
     if (is_rand) {
       data[i] = (std::rand() % 100) - 50;
     } else {
-      int shift = size >> 1;
-      data[i] = i - shift;
+      data[i] = i % 100 - 50;
     }
 }
 
@@ -127,9 +126,8 @@ static void VerifyDefMem(const mkldnn::memory &mem) {
       = static_cast<mshadow::default_real_t *>(mem.get_data_handle());
   size_t size = pd.get_size() / sizeof(mshadow::default_real_t);
   size_t num_same = 0;
-  int shift = size >> 1;
   for (int i = 0; i < size; i++)
-    num_same += data[i] == static_cast<mshadow::default_real_t>(i - shift);
+    num_same += data[i] == static_cast<mshadow::default_real_t>(i % 100 - 50);
   EXPECT_EQ(num_same, size);
 }
 
@@ -155,6 +153,13 @@ static void VerifyMem(const mkldnn::memory &mem) {
   }
 }
 
+static bool IsSameShape(mkldnn::memory::primitive_desc pd, TShape shape) {
+  if (pd.desc().data.ndims != shape.ndim()) return false;
+  for (size_t i = 0; i < shape.ndim(); i++)
+    if (pd.desc().data.dims[i] != shape[i]) return false;
+  return true;
+}
+
 static mkldnn::memory::primitive_desc GetMemPD(const TShape s, int dtype,
                                                mkldnn::memory::format format) {
   mkldnn::memory::dims dims(s.ndim());
@@ -370,6 +375,25 @@ struct OpAttrs {
   std::set<OpReqType> requests;
   int num_inputs;
   int num_outputs;
+  int input_types;
+  int output_types;
+};
+
+enum ArrayTypes {
+  Normal = 1,
+  MKLDNN = 2,
+  MKLDNNDiffShape = 4,
+  MKLDNNDiffDim = 8,
+  NormalReshaped = 16,
+  MKLDNNReshaped = 32,
+  MKLDNNReshapedDiffShape = 64,
+  MKLDNNReshapedDiffDim = 128,
+  NormalReused = 256,
+  MKLDNNReused = 512,
+  MKLDNNReusedDiffDim = 1024,
+  NormalReshapedReused = 2048,
+  NormalReusedDiffDtype = 4096,
+  All = 8191,
 };
 
 OpAttrs GetCopyOp() {
@@ -535,6 +559,38 @@ void PrintVerifyMsg(const NDArrayAttrs &arr1, const NDArrayAttrs &arr2) {
      t1 << " with " << arr2.desc.c_str() << " " << t2 << "\n";
 }
 
+OpAttrs GetLRNOp() {
+  OpAttrs attrs;
+  attrs.attrs.op = Op::Get("LRN");
+  attrs.num_inputs = 1;
+  attrs.num_outputs = 2;
+  attrs.attrs.dict.insert({"nsize" , "3"});
+  attrs.attrs.op->attr_parser(&attrs.attrs);
+  attrs.dispatches.resize(2);
+  attrs.requests.insert(OpReqType::kWriteTo);
+  attrs.input_types = ArrayTypes::Normal |
+      ArrayTypes::MKLDNN |
+      ArrayTypes::NormalReshaped |
+      ArrayTypes::MKLDNNReshaped;
+  attrs.output_types = ArrayTypes::Normal |
+      ArrayTypes::MKLDNN |
+      ArrayTypes::NormalReshaped |
+      ArrayTypes::MKLDNNReshaped;
+  return attrs;
+}
+
+OpAttrs GetLRNBackwardsOp() {
+  OpAttrs attrs;
+  attrs.attrs.op = Op::Get("_backward_LRN");
+  attrs.num_inputs = 3;
+  attrs.num_outputs = 1;
+  attrs.attrs.dict.insert({"nsize" , "3"});
+  attrs.attrs.op->attr_parser(&attrs.attrs);
+  attrs.dispatches.resize(2);
+  attrs.requests.insert(OpReqType::kWriteTo);
+  return attrs;
+}
+
 /*
  * We want to get a few types of NDArrays for testing:
  * 1. Normal NDArray
@@ -557,7 +613,9 @@ void PrintVerifyMsg(const NDArrayAttrs &arr1, const NDArrayAttrs &arr2) {
  *
  *  num_inputs / dim arguments used to scale shape (used for concat backwards to enlarge input shapes)
  */
-std::vector<NDArrayAttrs> GetTestInputArrays(bool rand = false, int num_inputs = 1, int dim = 0) {
+std::vector<NDArrayAttrs> GetTestInputArrays(
+    int types = ArrayTypes::All, bool rand = false,
+    int num_inputs = 1, int dim = 0) {
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<nnvm::TShape> shapes = tas.shapes;
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
@@ -575,8 +633,20 @@ std::vector<NDArrayAttrs> GetTestInputArrays(bool rand = false, int num_inputs =
 
     // Type 1.
     NDArray arr(shape, Context());
-    in_arrs.emplace_back(arr, "Normal NDArray");
-    InitDefaultArray(&in_arrs.back().arr, rand);
+    if (types & ArrayTypes::Normal) {
+      InitDefaultArray(&arr, rand);
+      in_arrs.emplace_back(arr, "Normal NDArray");
+    }
+
+    // Type 4
+    arr = NDArray(shape, Context());
+    if (types & ArrayTypes::NormalReshaped) {
+        InitDefaultArray(&arr, rand);
+        in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount),
+                "Reshaped Normal NDArray");
+    }
+
+
     for (auto pd : pds) {
       if (num_inputs > 1) {
         // preserve if matching layout else just expand on 0 dim
@@ -591,27 +661,47 @@ std::vector<NDArrayAttrs> GetTestInputArrays(bool rand = false, int num_inputs =
 
       // Type 2, 3.
       arr = NDArray(shape, Context());
-      desc = "MKLDNN NDArray";
-      if (shape.ndim() != pd.desc().data.ndims) {
+      if (shape.ndim() == pd.desc().data.ndims && IsSameShape(pd, shape)
+          && types & ArrayTypes::MKLDNN) {
+        desc = "MKLDNN NDArray";
+        InitMKLDNNArray(&arr, pd, rand);
+        in_arrs.emplace_back(arr, desc);
+      } else if (shape.ndim() == pd.desc().data.ndims && !IsSameShape(pd, shape)
+          && types & ArrayTypes::MKLDNNDiffShape) {
+        desc = "MKLDNN NDArray with different shape";
+        InitMKLDNNArray(&arr, pd, rand);
+        in_arrs.emplace_back(arr, desc);
+      } else if (shape.ndim() != pd.desc().data.ndims && types & ArrayTypes::MKLDNNDiffDim) {
         std::stringstream ss;
-        ss << "MKLDNN NDArray with different memory layout " <<
+        ss << "MKLDNN NDArray with different dim " <<
            shape.ndim() << "/" << pd.desc().data.ndims;
         desc = ss.str();
+        InitMKLDNNArray(&arr, pd, rand);
+        in_arrs.emplace_back(arr, desc);
       }
-      InitMKLDNNArray(&arr, pd);
-      in_arrs.emplace_back(arr, desc);
 
-      // Type 4, 5, 6.
+
+      // Type 5, 6.
       arr = NDArray(shape, Context());
-      desc = "Reshaped MKLDNN NDArray";
-      if (shape.ndim() != pd.desc().data.ndims) {
+      if (shape.ndim() == pd.desc().data.ndims && IsSameShape(pd, shape)
+          && types & ArrayTypes::MKLDNNReshaped) {
+        desc = "Reshaped MKLDNN NDArray";
+        InitMKLDNNArray(&arr, pd, rand);
+        in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc);
+      } else if (shape.ndim() == pd.desc().data.ndims && !IsSameShape(pd, shape)
+          && types & ArrayTypes::MKLDNNReshapedDiffShape) {
+        desc = "Reshaped MKLDNN NDArray with different shape";
+        InitMKLDNNArray(&arr, pd, rand);
+        in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc);
+      } else if (shape.ndim() != pd.desc().data.ndims
+          && types & ArrayTypes::MKLDNNReshapedDiffDim) {
         std::stringstream ss;
-        ss << "Reshaped MKLDNN NDArray with different memory layout "
-           << shape.ndim() << "/" << pd.desc().data.ndims;
+        ss << "MKLDNN NDArray with different dim " <<
+           shape.ndim() << "/" << pd.desc().data.ndims;
         desc = ss.str();
+        InitMKLDNNArray(&arr, pd, rand);
+        in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc);
       }
-      InitMKLDNNArray(&arr, pd);
-      in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc);
     }
   }
   return in_arrs;
@@ -640,7 +730,7 @@ std::vector<NDArrayAttrs> GetTestInputArrays(bool rand = false, int num_inputs =
 std::vector<NDArrayAttrs> GetTestOutputArrays(
     const TShape &shp,
     const std::vector<mkldnn::memory::primitive_desc> &pds,
-    std::vector<float>scale = {1}) {
+    std::vector<float>scale = {1}, bool rand = true, int types = ArrayTypes::All) {
   TShape shape = shp;
 
   for (int dim = 0; dim < scale.size(); dim++)
@@ -650,39 +740,50 @@ std::vector<NDArrayAttrs> GetTestOutputArrays(
   std::string desc;
   // Type 1.
   NDArray arr(shape, Context());
-  in_arrs.emplace_back(arr, "Normal NDArray");
-  InitDefaultArray(&in_arrs.back().arr, true);
 
-  // Type 4.
+  if (types & ArrayTypes::Normal) {
+    in_arrs.emplace_back(arr, "Normal NDArray");
+    InitDefaultArray(&in_arrs.back().arr, rand);
+  }
+
   TShape tmp_shape = shape;
-  tmp_shape[0] = shape[0] * 2;
-  NDArray arr0(tmp_shape, Context());
-  InitDefaultArray(&arr0, true);
-  in_arrs.emplace_back(arr0.Slice(1, shape[0] + 1), "Reshaped NDArray");
+  if (types & ArrayTypes::NormalReshaped) {
+    // Type 4.
+    tmp_shape[0] = shape[0] * 2;
+    NDArray arr0(tmp_shape, Context());
+    InitDefaultArray(&arr0, rand);
+    in_arrs.emplace_back(arr0.Slice(1, shape[0] + 1), "Reshaped NDArray");
+  }
 
-  // Type 5.
-  // Get a reused version.
   nnvm::TShape s(1);
-  s[0] = shape.Size();
-  NDArray arr1(s, Context());
-  arr1 = arr1.AsArray(shape, arr1.dtype());
-  InitDefaultArray(&arr1, true);
-  in_arrs.emplace_back(arr1, "Reused NDArray");
-
-  // Type 6.
-  s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag);
-  NDArray arr2(s, Context(), true, mshadow::kUint8);
-  arr2 = arr2.AsArray(shape, mshadow::default_type_flag);
-  InitDefaultArray(&arr2, true);
-  in_arrs.emplace_back(arr2, "Reused NDArray with diff data type");
-
-  // Type 7
-  s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag) * 2;
-  NDArray arr3(s, Context(), true, mshadow::kUint8);
-  tmp_shape[0] = shape[0] * 2;
-  arr3 = arr3.AsArray(tmp_shape, mshadow::default_type_flag);
-  InitDefaultArray(&arr3, true);
-  in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1), "Reused+Reshaped NDArray");
+  if (types & ArrayTypes::NormalReused) {
+    // Type 5.
+    // Get a reused version.
+    s[0] = shape.Size();
+    NDArray arr1(s, Context());
+    arr1 = arr1.AsArray(shape, arr1.dtype());
+    InitDefaultArray(&arr1, rand);
+    in_arrs.emplace_back(arr1, "Reused NDArray");
+  }
+
+  if (types & ArrayTypes::NormalReusedDiffDtype) {
+    // Type 6.
+    s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag);
+    NDArray arr2(s, Context(), true, mshadow::kUint8);
+    arr2 = arr2.AsArray(shape, mshadow::default_type_flag);
+    InitDefaultArray(&arr2, rand);
+    in_arrs.emplace_back(arr2, "Reused NDArray with diff data type");
+  }
+
+  if (types & ArrayTypes::NormalReshapedReused) {
+    // Type 7
+    s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag) * 2;
+    NDArray arr3(s, Context(), true, mshadow::kUint8);
+    tmp_shape[0] = shape[0] * 2;
+    arr3 = arr3.AsArray(tmp_shape, mshadow::default_type_flag);
+    InitDefaultArray(&arr3, rand);
+    in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1), "Reused+Reshaped NDArray");
+  }
 
   for (auto pd : pds) {
     if (shape.Size() != pd.get_size() / sizeof(mshadow::default_real_t))
@@ -703,8 +804,12 @@ std::vector<NDArrayAttrs> GetTestOutputArrays(
          << shape.ndim() << "/" << pd.desc().data.ndims;
       desc = ss.str();
     }
-    in_arrs.emplace_back(arr, desc);
-    InitMKLDNNArray(&in_arrs.back().arr, pd, true);
+
+    if ((types & ArrayTypes::MKLDNN && shape.ndim() == pd.desc().data.ndims) ||
+        (types & ArrayTypes::MKLDNNDiffDim && shape.ndim() != pd.desc().data.ndims)) {
+      in_arrs.emplace_back(arr, desc);
+      InitMKLDNNArray(&in_arrs.back().arr, pd, rand);
+    }
 
     // Type 8, 9.
     // Get a reused version.
@@ -712,7 +817,7 @@ std::vector<NDArrayAttrs> GetTestOutputArrays(
     s[0] = shape.Size();
     NDArray arr = NDArray(s, Context());
     arr = arr.AsArray(shape, arr.dtype());
-    InitMKLDNNArray(&arr, pd, true);
+    InitMKLDNNArray(&arr, pd, rand);
     desc = "Reused MKLDNN NDArray";
     if (shape.ndim() != pd.desc().data.ndims) {
       std::stringstream ss;
@@ -720,7 +825,11 @@ std::vector<NDArrayAttrs> GetTestOutputArrays(
          << shape.ndim() << "/" << pd.desc().data.ndims;
       desc = ss.str();
     }
-    in_arrs.emplace_back(arr, desc);
+
+    if ((types & ArrayTypes::MKLDNNReused && shape.ndim() == pd.desc().data.ndims) ||
+        (types & ArrayTypes::MKLDNNReusedDiffDim && shape.ndim() != pd.desc().data.ndims)) {
+      in_arrs.emplace_back(arr, desc);
+    }
   }
   return in_arrs;
 }
@@ -729,7 +838,8 @@ TEST(MKLDNN_NDArray, GetTestInputArraysConcat) {
   auto in_arrs = GetTestInputArrays();
   for (int dim = 0; dim < 5; dim++) {
     for (int num_inputs = 2; num_inputs < 5; num_inputs++) {
-      std::vector<NDArrayAttrs> expanded_arrs = GetTestInputArrays(false, num_inputs, dim);
+      std::vector<NDArrayAttrs> expanded_arrs = GetTestInputArrays(
+          ArrayTypes::All, false, num_inputs, dim);
       int i = 0;
       for (auto &arr : in_arrs) {
         if (dim >= arr.arr.shape().ndim())
@@ -781,6 +891,19 @@ void VerifyCopyResult(const std::vector<NDArray *> &in_arrs,
                    tmp1.shape().Size() * sizeof(mshadow::default_real_t)), 0);
 }
 
+void AssertEqual(const std::vector<NDArray *> &in_arrs,
+                      const std::vector<NDArray *> &out_arrs) {
+  NDArray tmp1 = in_arrs[0]->Reorder2Default();
+  NDArray tmp2 = out_arrs[0]->Reorder2Default();
+  EXPECT_EQ(tmp1.shape().Size(), tmp2.shape().Size());
+  TBlob blob1 = tmp1.data();
+  TBlob blob2 = tmp2.data();
+  mshadow::default_real_t *d1 = static_cast<mshadow::default_real_t*>(blob1.dptr_);
+  mshadow::default_real_t *d2 = static_cast<mshadow::default_real_t*>(blob2.dptr_);
+  for (int i = 0; i < tmp1.shape().Size(); i++)
+    ASSERT_FLOAT_EQ(d1[i], d2[i]);
+}
+
 void VerifyActResult(const std::vector<NDArray *> &in_arrs,
                      const std::vector<NDArray *> &out_arrs) {
   NDArray tmp1 = in_arrs[0]->Reorder2Default();
@@ -1055,7 +1178,7 @@ void TestConcatOp(const OpAttrs &attrs, VerifyFunc verify_fn,
   if (backwards) {
     std::string str_dim = const_cast<OpAttrs&>(attrs).attrs.dict["dim"];
     int dim = std::stoi(str_dim);
-    in_arrs = GetTestInputArrays(false, attrs.num_outputs, dim);
+    in_arrs = GetTestInputArrays(ArrayTypes::All, false, attrs.num_outputs, dim);
   }
 
   for (auto &in_arr : in_arrs) {
@@ -1094,6 +1217,95 @@ void TestConcatOp(const OpAttrs &attrs, VerifyFunc verify_fn,
   }
 }
 
+// compares output of fcompute with fcomputex
+void TestOpEx(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) {
+  std::vector<NDArray*> inputs(forward_attrs.num_inputs);
+  std::vector<NDArray*> outputs(forward_attrs.num_outputs);
+  std::vector<NDArray*> ex_outputs(forward_attrs.num_outputs);
+
+  std::vector<NDArray*> backwards_input(backwards_attrs.num_inputs);
+  std::vector<NDArray*> backwards_outputs(backwards_attrs.num_outputs);
+  std::vector<NDArray*> backwards_ex_outputs(backwards_attrs.num_outputs);
+
+
+  std::vector<OpReqType> req(forward_attrs.num_outputs);
+  std::vector<OpReqType> back_req(backwards_attrs.num_outputs);
+
+  TestArrayShapes tas = GetTestArrayShapes();
+  std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
+
+  std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays(forward_attrs.input_types, true);
+  std::vector<std::vector<NDArrayAttrs>> out_arrs(forward_attrs.num_outputs);
+  std::vector<std::vector<NDArrayAttrs>> ex_out_arrs(forward_attrs.num_outputs);
+
+  if (forward_attrs.requests.find(OpReqType::kWriteTo) != forward_attrs.requests.end()) {
+    for (int i1 = 0; i1 < in_arrs.size(); i1++) {
+      auto in_arr = in_arrs[i1];
+
+      // TODO(alex): (MXNET-845) Remove when MKLDNN supports other dims
+      if (in_arr.arr.shape().ndim() != 4)
+        continue;
+
+      for (int i = 0; i < forward_attrs.num_outputs; i++) {
+        out_arrs[i] =
+            GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, forward_attrs.output_types);
+        ex_out_arrs[i] =
+            GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, forward_attrs.output_types);
+      }
+
+      for (int i = 0; i < forward_attrs.num_inputs; i++)
+        inputs[i] = &in_arr.arr;
+
+      for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) {
+        if (out_arrs[0][output_i].arr.IsMKLDNNData())
+          continue;
+
+        for (int i = 0; i < forward_attrs.num_outputs; i++) {
+          req[i] = kWriteTo;
+          outputs[i] = &out_arrs[i][output_i].arr;
+          ex_outputs[i] = &ex_out_arrs[i][output_i].arr;
+        }
+        Imperative::Get()->set_is_training(true);
+
+        PrintVerifyMsg(in_arr, out_arrs[0][output_i]);
+        Imperative::Get()->InvokeOp(
+            Context(), forward_attrs.attrs, inputs, outputs, req,
+            DispatchMode::kFCompute, mxnet::OpStatePtr());
+        Imperative::Get()->InvokeOp(
+            Context(), forward_attrs.attrs, inputs, ex_outputs, req,
+            DispatchMode::kFComputeEx, mxnet::OpStatePtr());
+        Engine::Get()->WaitForAll();
+        AssertEqual(outputs, ex_outputs);
+
+        // backwards test performed same time since output needed
+        backwards_input[0] = outputs[0];  // output grad
+        backwards_input[1] = inputs[0];  // input
+        backwards_input[2] = outputs[1];  // out norm
+
+        auto tmp_output = GetTestInputArrays(forward_attrs.input_types, true)[i1];
+        backwards_outputs[0] = &tmp_output.arr;
+
+        auto tmp_output2 = GetTestInputArrays(forward_attrs.input_types, true)[i1];
+        backwards_ex_outputs[0] = &tmp_output2.arr;
+
+        for (int i = 0; i < backwards_attrs.num_outputs; i++)
+          back_req[i] = kWriteTo;
+
+        std::cout << "Backwards: ";
+        PrintVerifyMsg(out_arrs[0][output_i], tmp_output);
+        Imperative::Get()->InvokeOp(
+            Context(), backwards_attrs.attrs, backwards_input, backwards_outputs,
+            back_req, DispatchMode::kFCompute, mxnet::OpStatePtr());
+        Imperative::Get()->InvokeOp(
+            Context(), backwards_attrs.attrs, backwards_input, backwards_ex_outputs,
+            back_req, DispatchMode::kFComputeEx, mxnet::OpStatePtr());
+        Engine::Get()->WaitForAll();
+        AssertEqual(backwards_outputs, backwards_ex_outputs);
+      }
+    }
+  }
+}
+
 int CalculateWidthPoolOutput(int width, int kernel, int padding, int stride) {
   return (width - kernel + 2 * padding) / stride  + 1;
 }
@@ -1252,6 +1464,12 @@ TEST(IMPERATIVE, ConcatBackwardsOp) {
   }
 }
 
+TEST(IMPERATIVE, LRNOp) {
+  OpAttrs forward_attrs = GetLRNOp();
+  OpAttrs backwards_attrs = GetLRNBackwardsOp();
+  TestOpEx(forward_attrs, backwards_attrs);
+}
+
 TEST(IMPERATIVE, PoolingOp) {
   for (int dim = 2; dim < 4; dim++) {
     for (int kernel = 1; kernel < 4; kernel++) {
@@ -1270,7 +1488,7 @@ TEST(IMPERATIVE, PoolingOp) {
 
 TEST(MKLDNN_BASE, MKLDNNSum) {
   std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays();
-  std::vector<NDArrayAttrs> in_arrs2 = GetTestInputArrays(true);
+  std::vector<NDArrayAttrs> in_arrs2 = GetTestInputArrays(ArrayTypes::All, true);
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
 
@@ -1320,7 +1538,7 @@ TEST(MKLDNN_BASE, MKLDNNSum) {
 
 TEST(MKLDNN_BASE, CreateMKLDNNMem) {
   std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays();
-  std::vector<NDArrayAttrs> in_arrs2 = GetTestInputArrays(true);
+  std::vector<NDArrayAttrs> in_arrs2 = GetTestInputArrays(ArrayTypes::All, true);
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
   MKLDNNStream *stream = MKLDNNStream::Get();


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services