You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/01/31 20:53:06 UTC

[GitHub] piiswrong closed pull request #9369: UT fix for Windows

piiswrong closed pull request #9369: UT fix for Windows
URL: https://github.com/apache/incubator-mxnet/pull/9369
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/include/mxnet/resource.h b/include/mxnet/resource.h
index 385573259f..bb2d263414 100644
--- a/include/mxnet/resource.h
+++ b/include/mxnet/resource.h
@@ -188,7 +188,7 @@ struct Resource {
 };
 
 /*! \brief Global resource manager */
-class ResourceManager {
+class MXNET_API ResourceManager {
  public:
   /*!
    * \brief Get resource of requested type.
diff --git a/include/mxnet/storage.h b/include/mxnet/storage.h
index a8481c1d36..78dee75358 100644
--- a/include/mxnet/storage.h
+++ b/include/mxnet/storage.h
@@ -33,7 +33,7 @@ namespace mxnet {
 /*!
  * \brief Storage manager across multiple devices.
  */
-class Storage {
+class MXNET_API Storage {
  public:
   /*!
    * \brief Storage handle.
diff --git a/nnvm b/nnvm
index 7a052d6784..0c08e4edb5 160000
--- a/nnvm
+++ b/nnvm
@@ -1 +1 @@
-Subproject commit 7a052d678455f1c96538c1cc5a25f11115363558
+Subproject commit 0c08e4edb5c40daa7bd484a5351e4c769f4d179a
diff --git a/src/engine/engine_impl.h b/src/engine/engine_impl.h
index b3ec34dc85..b773566330 100644
--- a/src/engine/engine_impl.h
+++ b/src/engine/engine_impl.h
@@ -89,12 +89,12 @@ static constexpr std::size_t kMaxNumGPUs = 16;
 
 // predeclare factory function for each type of engine
 /*! \return NaiveEngine instance */
-Engine *CreateNaiveEngine();
+MXNET_API Engine *CreateNaiveEngine();
 #if MXNET_PREDICT_ONLY == 0
 /*! \return ThreadedEnginePooled instance */
-Engine *CreateThreadedEnginePooled();
+MXNET_API Engine *CreateThreadedEnginePooled();
 /*! \return ThreadedEnginePerDevie instance */
-Engine *CreateThreadedEnginePerDevice();
+MXNET_API Engine *CreateThreadedEnginePerDevice();
 #endif
 }  // namespace engine
 }  // namespace mxnet
diff --git a/src/executor/exec_pass.h b/src/executor/exec_pass.h
index bf4b14771d..355633ce50 100644
--- a/src/executor/exec_pass.h
+++ b/src/executor/exec_pass.h
@@ -183,7 +183,7 @@ Graph InferStorageType(Graph&& graph,
  *         are kDefaultStorage, DispatchMode::kFCompute is assigned to dispatch_mode. Otherwise,
  *         DispatchMode::kFComputeFallback is assigned to dispatch_mode.
  */
-bool DefaultStorageType(const nnvm::NodeAttrs& attrs,
+MXNET_API bool DefaultStorageType(const nnvm::NodeAttrs& attrs,
                         const int dev_mask,
                         DispatchMode* dispatch_mode,
                         std::vector<int> *iattr,
diff --git a/src/operator/batch_norm_v1-inl.h b/src/operator/batch_norm_v1-inl.h
index 329d66d06d..1b347e92b9 100644
--- a/src/operator/batch_norm_v1-inl.h
+++ b/src/operator/batch_norm_v1-inl.h
@@ -46,7 +46,7 @@ enum BatchNormOpAuxiliary {kMovingMean, kMovingVar};
 enum BatchNormBackResource {kTempSpace};
 }  // namespace batchnorm_v1
 
-struct BatchNormV1Param : public dmlc::Parameter<BatchNormV1Param> {
+struct MXNET_API BatchNormV1Param : public dmlc::Parameter<BatchNormV1Param> {
   float eps;
   float momentum;
   bool fix_gamma;
@@ -245,7 +245,7 @@ Operator *CreateOp(BatchNormV1Param param, int dtype);
 
 
 #if DMLC_USE_CXX11
-class BatchNormV1Prop : public OperatorProperty {
+class MXNET_API BatchNormV1Prop : public OperatorProperty {
  public:
   void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
     param_.Init(kwargs);
diff --git a/src/operator/nn/activation-inl.h b/src/operator/nn/activation-inl.h
index ac8b747f0f..59b60f49d4 100644
--- a/src/operator/nn/activation-inl.h
+++ b/src/operator/nn/activation-inl.h
@@ -48,7 +48,7 @@ enum ActivationOpOutputs {kOut};
 enum ActivationOpType {kReLU, kSigmoid, kTanh, kSoftReLU};
 }  // activation
 
-struct ActivationParam : public dmlc::Parameter<ActivationParam> {
+struct MXNET_API ActivationParam : public dmlc::Parameter<ActivationParam> {
   // use int for enumeration
   int act_type;
   DMLC_DECLARE_PARAMETER(ActivationParam) {
@@ -125,7 +125,7 @@ template<typename xpu>
 Operator* CreateOp(ActivationParam type, int dtype, const TShape& dshape);
 
 #if DMLC_USE_CXX11
-class ActivationProp : public OperatorProperty {
+class MXNET_API ActivationProp : public OperatorProperty {
  public:
   void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
     param_.Init(kwargs);
diff --git a/src/operator/nn/batch_norm-imp.h b/src/operator/nn/batch_norm-imp.h
new file mode 100644
index 0000000000..1b3024a493
--- /dev/null
+++ b/src/operator/nn/batch_norm-imp.h
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2017 by Contributors
+ * \file batch_norm-imp.h
+ * \brief
+ * \author Bing Xu, Chris Olivier, Hector Li
+ */
+#ifndef MXNET_OPERATOR_NN_BATCH_NORM_IMP_H_
+#define MXNET_OPERATOR_NN_BATCH_NORM_IMP_H_
+
+#include "batch_norm-inl.h"
+
+namespace mxnet {
+namespace op {
+
+namespace batchnorm {
+
+    volatile bool disable_mkl = false;
+
+}  // namespace batchnorm
+
+}  // namespace op
+}  // namespace mxnet
+
+
+
+#endif  // MXNET_OPERATOR_NN_BATCH_NORM_IMP_H_
+
diff --git a/src/operator/nn/batch_norm-inl.h b/src/operator/nn/batch_norm-inl.h
index 2a9dee2cf8..80da9a6094 100644
--- a/src/operator/nn/batch_norm-inl.h
+++ b/src/operator/nn/batch_norm-inl.h
@@ -56,7 +56,7 @@ constexpr int DEFAULT_AXIS = 1;
 }  // namespace batchnorm
 
 /*! \brief Parameters for BatchNoram operator */
-struct BatchNormParam : public dmlc::Parameter<BatchNormParam> {
+struct MXNET_API BatchNormParam : public dmlc::Parameter<BatchNormParam> {
   double eps;
   float momentum;
   bool fix_gamma;
@@ -216,7 +216,7 @@ template<typename xpu>
 Operator *CreateOp(BatchNormParam param, const int dtype, const TShape& shape);
 
 #if DMLC_USE_CXX11
-class BatchNormProp : public OperatorProperty {
+class MXNET_API BatchNormProp : public OperatorProperty {
  public:
   void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
     param_.Init(kwargs);
diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index ca28832394..b21b5eeda9 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -25,6 +25,7 @@
 */
 
 #include "batch_norm-inl.h"
+#include "batch_norm-imp.h"
 #include <nnvm/op_attr_types.h>
 #if MXNET_USE_MKL2017 == 1
 #include <mkl_memory.h>
@@ -40,9 +41,6 @@ namespace mxnet {
 namespace op {
 namespace batchnorm {
 
-/*! \brief Global disable of batchnorm mkl operator for unit testing */
-volatile bool disable_mkl = false;
-
 /*! \brief Fast-foreach when you don't care about the position other than channel */
 template<typename DType, typename OnData>
 static inline void ForEachFast(const BNTensor3<DType> &tensor,
diff --git a/src/operator/nn/fully_connected-inl.h b/src/operator/nn/fully_connected-inl.h
index 9f3deec244..34ca349d28 100644
--- a/src/operator/nn/fully_connected-inl.h
+++ b/src/operator/nn/fully_connected-inl.h
@@ -46,7 +46,7 @@ enum FullyConnectedOpInputs {kData, kWeight, kBias};
 enum FullyConnectedOpOutputs {kOut};
 }  // fullc
 
-struct FullyConnectedParam : public dmlc::Parameter<FullyConnectedParam> {
+struct MXNET_API FullyConnectedParam : public dmlc::Parameter<FullyConnectedParam> {
   int num_hidden;
   bool no_bias;
   bool flatten;
@@ -189,7 +189,7 @@ Operator* CreateOp(FullyConnectedParam param, int dtype,
                    Context ctx);
 
 #if DMLC_USE_CXX11
-class FullyConnectedProp : public OperatorProperty {
+class MXNET_API FullyConnectedProp : public OperatorProperty {
  public:
   std::vector<std::string> ListArguments() const override {
     if (!param_.no_bias) {
diff --git a/src/operator/operator_tune-inl.h b/src/operator/operator_tune-inl.h
index d4eec999f9..849f751d19 100644
--- a/src/operator/operator_tune-inl.h
+++ b/src/operator/operator_tune-inl.h
@@ -519,6 +519,32 @@ class OperatorTune : public OperatorTuneByType<DType> {
   static bool output_tuning_data_;
 };
 
+/*!
+ * \brief Instantiate static variables for OperatorTune<DType>, where 'DType' is specified
+ */
+#define IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(__typ$) \
+  template<> bool OperatorTune<__typ$>::initialized_ = false; \
+  template<> std::vector<__typ$> OperatorTune<__typ$>::data_set_ = {}; \
+  template<> volatile tune::TuningMode OperatorTuneByType<__typ$>::tuning_mode_ = tune::kAuto; \
+  template<> volatile int OperatorTune<__typ$>::volatile_int_ = 9;  /* arbitrary number */ \
+  template<> std::unordered_set<std::string> OperatorTune<__typ$>::operator_names_({}); \
+  template<> bool OperatorTune<__typ$>::output_tuning_data_ = false; \
+  template<> std::list<void (*)()> *OperatorTune<__typ$>::GetTuningList() { \
+    static std::list<void (*)()> ll; \
+    return &ll; \
+  }
+
+/*!
+ * \brief Static variables for different types (ie OperatorTune<float>, OperatorTune<double>, etc.
+ */
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(float);
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(double);
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(mshadow::half::half_t);
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(int8_t);
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(uint8_t);
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(int32_t);
+IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(int64_t);
+
 /*!
  * \brief Class that tunes unary operators
  * \tparam DType Data type to be used when tuning the kernel operations
diff --git a/src/operator/operator_tune.cc b/src/operator/operator_tune.cc
index 7cdf7a2078..04cf272271 100644
--- a/src/operator/operator_tune.cc
+++ b/src/operator/operator_tune.cc
@@ -34,32 +34,6 @@ std::atomic<bool> OperatorTuneBase::calculated_(false);
 bool OperatorTuneBase::verbose_tuning_info_ = false;
 double OperatorTuneBase::tuning_weight_scale_ = 0.0;
 
-/*!
- * \brief Instantiate static variables for OperatorTune<DType>, where 'DType' is specified
- */
-#define IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(__typ$) \
-  template<> bool OperatorTune<__typ$>::initialized_ = false; \
-  template<> std::vector<__typ$> OperatorTune<__typ$>::data_set_ = {}; \
-  template<> volatile tune::TuningMode OperatorTuneByType<__typ$>::tuning_mode_ = tune::kAuto; \
-  template<> volatile int OperatorTune<__typ$>::volatile_int_ = 9;  /* arbitrary number */ \
-  template<> std::unordered_set<std::string> OperatorTune<__typ$>::operator_names_({}); \
-  template<> bool OperatorTune<__typ$>::output_tuning_data_ = false; \
-  template<> std::list<void (*)()> *OperatorTune<__typ$>::GetTuningList() { \
-    static std::list<void (*)()> ll; \
-    return &ll; \
-  }
-
-/*!
- * \brief Static variables for different types (ie OperatorTune<float>, OperatorTune<double>, etc.
- */
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(float);
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(double);
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(mshadow::half::half_t);
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(int8_t);
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(uint8_t);
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(int32_t);
-IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(int64_t);
-
 /*!
  * \brief Init variable used to facilitate registering a tunable operator during
  *        static initialization
diff --git a/src/operator/operator_tune.h b/src/operator/operator_tune.h
index 6e73ed3711..68a5b61e27 100644
--- a/src/operator/operator_tune.h
+++ b/src/operator/operator_tune.h
@@ -169,7 +169,7 @@ enum TuningMode {
 }  // namespace tune
 
 template<typename DType>
-class OperatorTuneByType : public OperatorTuneBase {
+class MXNET_API OperatorTuneByType : public OperatorTuneBase {
  public:
   /*!
    * \brief Set tuning mode
diff --git a/src/operator/slice_channel-inl.h b/src/operator/slice_channel-inl.h
index 3b14a26ea6..c7a8758c6a 100644
--- a/src/operator/slice_channel-inl.h
+++ b/src/operator/slice_channel-inl.h
@@ -44,7 +44,7 @@ namespace slice_enum {
 enum SliceChannelOpInputs {kData};
 }  // namespace slice_enum
 
-struct SliceChannelParam : public dmlc::Parameter<SliceChannelParam> {
+struct MXNET_API SliceChannelParam : public dmlc::Parameter<SliceChannelParam> {
   int num_outputs;
   int axis;
   bool squeeze_axis;
@@ -149,7 +149,7 @@ Operator *CreateOp(SliceChannelParam param, int dtype);
 
 
 #if DMLC_USE_CXX11
-class SliceChannelProp : public OperatorProperty {
+class MXNET_API SliceChannelProp : public OperatorProperty {
  public:
   void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
     param_.Init(kwargs);
diff --git a/tests/cpp/operator/batchnorm_test.cc b/tests/cpp/operator/batchnorm_test.cc
index 179e42a383..ac188446b6 100644
--- a/tests/cpp/operator/batchnorm_test.cc
+++ b/tests/cpp/operator/batchnorm_test.cc
@@ -27,6 +27,7 @@
 #include <dmlc/logging.h>
 #include <mxnet/tensor_blob.h>
 #include "../../src/operator/nn/batch_norm-inl.h"
+#include "../../src/operator/nn/batch_norm-imp.h"
 #include "../../src/operator/batch_norm_v1-inl.h"
 #include "./test_legacy_op.h"
 #include "executor/exec_pass.h"
@@ -770,11 +771,11 @@ static void timingTest(const std::string& label,
   ss << "Timing: " << COUNT << " iterations";
 
   for (size_t i = 0; i < COUNT; ++i) {
-    index_t batchSize;
-    index_t channels;
-    index_t depth;
-    index_t height;
-    index_t width;
+    mxnet::index_t batchSize;
+    mxnet::index_t channels;
+    mxnet::index_t depth;
+    mxnet::index_t height;
+    mxnet::index_t width;
 
     do {
       batchSize = stochastic ? test::rangedRand(1U, BATCH_SIZE * 2U) : TIMING_BATCH_SIZE;


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services