You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2020/01/15 11:51:35 UTC

[GitHub] [incubator-mxnet] hanke580 opened a new pull request #17323: [Numpy] Kron operator

hanke580 opened a new pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323
 
 
   ## Description ##
   * Add Kron operator
   * Including Forward and Backward
   
   ## Checklist ##
   ### Essentials ###
   Please feel free to remove inapplicable items for your PR.
   - [ ] The PR title starts with [MXNET-$JIRA_ID], where $JIRA_ID refers to the relevant [JIRA issue](https://issues.apache.org/jira/projects/MXNET/issues) created (except PRs with tiny changes)
   - [ ] Changes are complete (i.e. I finished coding on this PR)
   - [ ] All changes have test coverage:
   - Unit tests are added for small changes to verify correctness (e.g. adding a new operator)
   - Nightly tests are added for complicated/long-running ones (e.g. changing distributed kvstore)
   - Build tests will be added for build configuration changes (e.g. adding a new build option with NCCL)
   - [ ] Code is well-documented: 
   - For user-facing API changes, API doc string has been updated. 
   - For new C++ functions in header files, their functionalities and arguments are documented. 
   - For new examples, README.md is added to explain the what the example does, the source of the dataset, expected performance on test set and reference to the original paper if applicable
   - Check the API doc at https://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html
   - [ ] To the best of my knowledge, examples are either not affected by this change, or have been fixed to be compatible with this change
   
   ### Changes ###
   - [ ] Feature1, tests, (and when applicable, API doc)
   - [ ] Feature2, tests, (and when applicable, API doc)
   
   ## Comments ##
   - If this change is a backward incompatible change, why must this change be made.
   - Interesting edge cases to note here
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367134732
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
 
 Review comment:
   `struct kron_back_b {`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367134690
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
 
 Review comment:
   `struct kron_back_a {`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] hanke580 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
hanke580 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r368223778
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
 
 Review comment:
   Fixed, thx

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367132128
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
+                const std::vector<OpReqType>& req,
+                const TBlob& a,
+                const TBlob& b,
+                const TBlob& out
+                ){
+  using namespace mshadow;
+
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = out.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
+    Shape<ndim> ashape_;
+    Shape<ndim> bshape_;
+    Shape<ndim> oshape_;
+    int temp = ashape.ndim()-bshape.ndim();
+    int s_dim = temp>0?bshape.ndim():ashape.ndim();
+    for (int i = 0; i<s_dim; i++){
+      ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+      bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+      oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+    }
+    if (temp > 0) {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+        bshape_[ndim - i - 1] = 1;
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    } else {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = 1;
+        bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    }
+
+    // TensordotIntAxesImpl<xpu>(0, ctx, a, b, out, req[0]);
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
+      MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+        mxnet_op::Kernel<kron<ndim, req_type>, xpu>::Launch(
+          s, out.Size(), out.dptr<DType>(), a.dptr<DType>(), b.dptr<DType>(),
+          ashape_, bshape_, oshape_);
+      });
+    });
+  });
+};
+
+template<typename xpu>
+void KronOpBackwardImpl(const OpContext& ctx,
+                        const std::vector<OpReqType>& req,
+                        const TBlob& a,
+                        const TBlob& b,
+                        const TBlob& ograd,
+                        const TBlob& agrad,
+                        const TBlob& bgrad){
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = ograd.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
+    Shape<ndim> ashape_;
+    Shape<ndim> bshape_;
+    Shape<ndim> oshape_;
+    int temp = ashape.ndim()-bshape.ndim();
+    int s_dim = temp>0?bshape.ndim():ashape.ndim();
+    for (int i = 0; i<s_dim; i++){
+      ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+      bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+      oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+    }
+    if (temp > 0) {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+        bshape_[ndim - i - 1] = 1;
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    } else {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = 1;
+        bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    }
+
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    MSHADOW_TYPE_SWITCH(agrad.type_flag_, DType, {
+      MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+        mxnet_op::Kernel<kron_back_a<ndim, req_type>, xpu>::Launch(
+          s, agrad.Size(), agrad.dptr<DType>(), b.dptr<DType>(), ograd.dptr<DType>(),
+          ashape_, bshape_, oshape_);
+      });
+    });
+
+    MSHADOW_TYPE_SWITCH(bgrad.type_flag_, DType, {
+      MXNET_ASSIGN_REQ_SWITCH(req[1], req_type, {
+        mxnet_op::Kernel<kron_back_b<ndim, req_type>, xpu>::Launch(
+          s, bgrad.Size(), a.dptr<DType>(), bgrad.dptr<DType>(), ograd.dptr<DType>(),
+          ashape_, bshape_, oshape_);
+      });
+    });
+  });
+}
+
+template<typename xpu>
+inline void KronOpForward(const nnvm::NodeAttrs& attrs,
+                          const OpContext& ctx,
+                          const std::vector<TBlob>& inputs,
+                          const std::vector<OpReqType>& req,
+                          const std::vector<TBlob>& outputs) {
+  using namespace mshadow;
+
+  CHECK_EQ(inputs.size(), 2U);
+  CHECK_EQ(outputs.size(), 1U);
+
+  const TBlob& a = inputs[0];
+  const TBlob& b = inputs[1];
+  const TBlob& out = outputs[0];
+
+  KronOpForwardImpl<xpu>(ctx, req, a, b, out);
+}
+
+
+template<typename xpu>
+inline void KronOpBackward(const nnvm::NodeAttrs& attrs,
+                           const OpContext& ctx,
+                           const std::vector<TBlob>& inputs,
+                           const std::vector<OpReqType>& req,
+                           const std::vector<TBlob>& outputs) {
+  using namespace mxnet_op;
+  using namespace mshadow;
+
+  CHECK_EQ(inputs.size(), 3U);
+  CHECK_EQ(outputs.size(), 2U);
+
+  const TBlob& ograd = inputs[0];
+  const TBlob& a = inputs[1];
+  const TBlob& b = inputs[2];
+  const TBlob& grad_a = outputs[0];
+  const TBlob& grad_b = outputs[1];
+
+  KronOpBackwardImpl<xpu>(ctx, req, a, b, ograd, grad_a, grad_b);
+}
+
+}  // namespace op
+}  // namespace mxnet
+
+#endif  // MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
 
 Review comment:
   No need for this blank line.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367132285
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
+                const std::vector<OpReqType>& req,
+                const TBlob& a,
+                const TBlob& b,
+                const TBlob& out
+                ){
+  using namespace mshadow;
+
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = out.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
+    Shape<ndim> ashape_;
+    Shape<ndim> bshape_;
+    Shape<ndim> oshape_;
+    int temp = ashape.ndim()-bshape.ndim();
+    int s_dim = temp>0?bshape.ndim():ashape.ndim();
+    for (int i = 0; i<s_dim; i++){
+      ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+      bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+      oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+    }
+    if (temp > 0) {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+        bshape_[ndim - i - 1] = 1;
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    } else {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = 1;
+        bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    }
+
+    // TensordotIntAxesImpl<xpu>(0, ctx, a, b, out, req[0]);
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
+      MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+        mxnet_op::Kernel<kron<ndim, req_type>, xpu>::Launch(
+          s, out.Size(), out.dptr<DType>(), a.dptr<DType>(), b.dptr<DType>(),
+          ashape_, bshape_, oshape_);
+      });
+    });
+  });
+};
+
+template<typename xpu>
+void KronOpBackwardImpl(const OpContext& ctx,
+                        const std::vector<OpReqType>& req,
+                        const TBlob& a,
+                        const TBlob& b,
+                        const TBlob& ograd,
+                        const TBlob& agrad,
+                        const TBlob& bgrad){
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = ograd.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
 
 Review comment:
   get rid of this blank line.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367134864
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
+                const std::vector<OpReqType>& req,
+                const TBlob& a,
+                const TBlob& b,
+                const TBlob& out
+                ){
+  using namespace mshadow;
+
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = out.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
+    Shape<ndim> ashape_;
+    Shape<ndim> bshape_;
+    Shape<ndim> oshape_;
+    int temp = ashape.ndim()-bshape.ndim();
+    int s_dim = temp>0?bshape.ndim():ashape.ndim();
+    for (int i = 0; i<s_dim; i++){
+      ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+      bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+      oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+    }
+    if (temp > 0) {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+        bshape_[ndim - i - 1] = 1;
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    } else {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = 1;
+        bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    }
+
+    // TensordotIntAxesImpl<xpu>(0, ctx, a, b, out, req[0]);
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
+      MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+        mxnet_op::Kernel<kron<ndim, req_type>, xpu>::Launch(
+          s, out.Size(), out.dptr<DType>(), a.dptr<DType>(), b.dptr<DType>(),
+          ashape_, bshape_, oshape_);
+      });
+    });
+  });
+};
+
+template<typename xpu>
+void KronOpBackwardImpl(const OpContext& ctx,
+                        const std::vector<OpReqType>& req,
+                        const TBlob& a,
+                        const TBlob& b,
+                        const TBlob& ograd,
+                        const TBlob& agrad,
+                        const TBlob& bgrad){
 
 Review comment:
   `const TBlob& bgrad) {`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367131846
 
 

 ##########
 File path: src/operator/numpy/np_kron.cu
 ##########
 @@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron.cu
+ * \brief GPU Implementation of numpy-compatible Kronecker product
+ */
+
+ #include "./np_kron-inl.h"
+
+ namespace mxnet {
+ namespace op {
+
+ NNVM_REGISTER_OP(_npi_kron)
+ .set_attr<FCompute>("FCompute<gpu>", KronOpForward<gpu>);
+
+ NNVM_REGISTER_OP(_backward_npi_kron)
+ .set_attr<FCompute>("FCompute<gpu>", KronOpBackward<gpu>);
+
+ }  // namespace op
+ }  // namespace mxnet
+
 
 Review comment:
   No need for this blank line.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367134565
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
 
 Review comment:
   `struct kron {`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367133643
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
 
 Review comment:
   alignment:
   ```c++
   void KronOpForwardImpl(const OpContext& ctx,
                          const TBlob& a,
                          const TBlob& b,
                          const TBlob& out,
                          const OpReqType req) {
   ```
   Here since you're only using `req[0]` in this function I would suggest changing your function signature to look like this and call it like `KronOpForwardImpl(ctx, a, b, out, req[0]);` below.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367131577
 
 

 ##########
 File path: tests/python/unittest/test_numpy_op.py
 ##########
 @@ -418,6 +418,55 @@ def hybrid_forward(self, F, a, b):
                                        rtol=1e-1, atol=1e-1, dtype=dtype)
 
 
+@with_seed()
+@use_np
+def test_np_kron():
+    class TestKron(HybridBlock):
+        def __init__(self):
+            super(TestKron, self).__init__()
+
+        def hybrid_forward(self, F, a, b):
+            return F.np.kron(a, b)
+
+    # test input
+    tensor_shapes = [
+        ((3,), (3,)),
+        ((2, 3), (3,)),
+        ((2, 3, 4), (2,)),
+        ((3, 2), ())
+    ]
+
+    for hybridize in [True, False]:
+        for a_shape, b_shape in tensor_shapes:
+            for dtype in [_np.float32, _np.float64]:
+                test_kron = TestKron()
+                if hybridize:
+                    test_kron.hybridize()
+                a = rand_ndarray(shape=a_shape, dtype=dtype).as_np_ndarray()
+                b = rand_ndarray(shape=b_shape, dtype=dtype).as_np_ndarray()
+                a.attach_grad()
+                b.attach_grad()
+
+                np_out = _np.kron(a.asnumpy(), b.asnumpy())
+                with mx.autograd.record():
+                    mx_out = test_kron(a, b)
+                assert mx_out.shape == np_out.shape
+                assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False)
+                mx_out.backward()
 
 Review comment:
   Are you checking the `a.grad` and `b.grad` ?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 merged pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 merged pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323
 
 
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] hanke580 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
hanke580 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r367234104
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
+                const std::vector<OpReqType>& req,
+                const TBlob& a,
+                const TBlob& b,
+                const TBlob& out
+                ){
+  using namespace mshadow;
+
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = out.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
+    Shape<ndim> ashape_;
+    Shape<ndim> bshape_;
+    Shape<ndim> oshape_;
+    int temp = ashape.ndim()-bshape.ndim();
+    int s_dim = temp>0?bshape.ndim():ashape.ndim();
+    for (int i = 0; i<s_dim; i++){
+      ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+      bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+      oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+    }
+    if (temp > 0) {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = ashape[ashape.ndim() - i - 1];
+        bshape_[ndim - i - 1] = 1;
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    } else {
+      for (int i = s_dim; i<ndim; i++) {
+        ashape_[ndim - i - 1] = 1;
+        bshape_[ndim - i - 1] = bshape[bshape.ndim() - i - 1];
+        oshape_[ndim - i - 1] = oshape[oshape.ndim() - i - 1];
+      }
+    }
+
+    // TensordotIntAxesImpl<xpu>(0, ctx, a, b, out, req[0]);
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
+      MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+        mxnet_op::Kernel<kron<ndim, req_type>, xpu>::Launch(
+          s, out.Size(), out.dptr<DType>(), a.dptr<DType>(), b.dptr<DType>(),
+          ashape_, bshape_, oshape_);
+      });
+    });
+  });
+};
+
+template<typename xpu>
+void KronOpBackwardImpl(const OpContext& ctx,
+                        const std::vector<OpReqType>& req,
+                        const TBlob& a,
+                        const TBlob& b,
+                        const TBlob& ograd,
+                        const TBlob& agrad,
+                        const TBlob& bgrad){
+  const mxnet::TShape& ashape = a.shape_;
+  const mxnet::TShape& bshape = b.shape_;
+  const mxnet::TShape& oshape = ograd.shape_;
+  MXNET_NDIM_SWITCH(oshape.ndim(), ndim, {
+
 
 Review comment:
   Fixed, thx (Forgot to run cpplint)

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] hanke580 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
hanke580 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r394106448
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
 
 Review comment:
   Sorry for missing the alignment, already fixed!
   Thx

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #17323: [Numpy] Kron operator

Posted by GitBox <gi...@apache.org>.
haojin2 commented on a change in pull request #17323: [Numpy] Kron operator
URL: https://github.com/apache/incubator-mxnet/pull/17323#discussion_r391946230
 
 

 ##########
 File path: src/operator/numpy/np_kron-inl.h
 ##########
 @@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file np_kron-inl.h
+ * \brief Function definition of matrix numpy-compatible kron operator
+ */
+#ifndef MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+#define MXNET_OPERATOR_NUMPY_NP_KRON_INL_H_
+
+#include <vector>
+#include "np_tensordot_op-inl.h"
+#include "../mxnet_op.h"
+
+namespace mxnet {
+namespace op {
+
+template<int ndim, int req>
+struct kron{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* out,
+                                  const DType* a, const DType* b,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto k = unravel(i, oshape);
+    Shape<ndim> ia;
+    Shape<ndim> jb;
+    for(int q = 0; q < ndim; q++){
+      ia[q] = int(k[q] / bshape[q]);
+      jb[q] = k[q] % bshape[q];
+    }
+    auto idx_a = ravel(ia, ashape);
+    auto idx_b = ravel(jb, bshape);
+
+    KERNEL_ASSIGN(out[i], req, a[idx_a] * b[idx_b]);
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_a{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, DType* agrad,
+                                  const DType* b, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto ia = unravel(i, ashape);
+    Shape<ndim> k;
+    DType temp_agrad = 0;
+
+    for(int idx_b = 0; idx_b < bshape.Size(); idx_b++){
+      auto jb = unravel(idx_b, bshape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q]*bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_agrad += b[idx_b]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(agrad[i], req, temp_agrad);
+
+  }
+};
+
+template<int ndim, int req>
+struct kron_back_b{
+  template<typename DType>
+  MSHADOW_XINLINE static void Map(index_t i, const DType* a,
+                                  DType* bgrad, const DType* ograd,
+                                  mshadow::Shape<ndim> ashape,
+                                  mshadow::Shape<ndim> bshape,
+                                  mshadow::Shape<ndim> oshape) {
+    using namespace mxnet_op;
+
+    auto jb = unravel(i, bshape);
+    Shape<ndim> k;
+    DType temp_bgrad = 0;
+
+    for(int idx_a = 0; idx_a < ashape.Size(); idx_a++){
+      auto ia = unravel(idx_a, ashape);
+      for(int q = 0;q < ndim; q++){
+        k[q] = ia[q] * bshape[q] + jb[q];
+      }
+      auto idx_o = ravel(k, oshape);
+      temp_bgrad += a[idx_a]*ograd[idx_o];
+    }
+    KERNEL_ASSIGN(bgrad[i], req, temp_bgrad);
+  }
+};
+
+template<typename xpu>
+void KronOpForwardImpl(const OpContext& ctx,
 
 Review comment:
   seems like still not fixed on this version

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services