You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/11/07 18:13:22 UTC

[GitHub] szha closed pull request #13130: sample_like operators (#13034) v1.3.x

szha closed pull request #13130: sample_like operators (#13034) v1.3.x
URL: https://github.com/apache/incubator-mxnet/pull/13130
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/python/mxnet/base.py b/python/mxnet/base.py
index 2bfcdd62eda..11f06a4b58e 100644
--- a/python/mxnet/base.py
+++ b/python/mxnet/base.py
@@ -560,7 +560,7 @@ def _as_list(obj):
         return [obj]
 
 
-_OP_NAME_PREFIX_LIST = ['_contrib_', '_linalg_', '_sparse_', '_image_']
+_OP_NAME_PREFIX_LIST = ['_contrib_', '_linalg_', '_sparse_', '_image_', '_random_']
 
 
 def _get_op_name_prefix(op_name):
@@ -616,9 +616,13 @@ def _init_op_module(root_namespace, module_name, make_op_func):
         op_name_prefix = _get_op_name_prefix(name)
         module_name_local = module_name
         if len(op_name_prefix) > 0:
-            func_name = name[len(op_name_prefix):]
-            cur_module = submodule_dict[op_name_prefix]
-            module_name_local = "%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])
+            if op_name_prefix != '_random_' or name.endswith('_like'):
+                func_name = name[len(op_name_prefix):]
+                cur_module = submodule_dict[op_name_prefix]
+                module_name_local = "%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])
+            else:
+                func_name = name
+                cur_module = module_internal
         elif name.startswith('_'):
             func_name = name
             cur_module = module_internal
diff --git a/src/operator/random/sample_op.cc b/src/operator/random/sample_op.cc
index a2b332456fb..b18e70414a1 100644
--- a/src/operator/random/sample_op.cc
+++ b/src/operator/random/sample_op.cc
@@ -37,15 +37,44 @@ DMLC_REGISTER_PARAMETER(SamplePoissonParam);
 DMLC_REGISTER_PARAMETER(SampleNegBinomialParam);
 DMLC_REGISTER_PARAMETER(SampleGenNegBinomialParam);
 
-#define MXNET_OPERATOR_REGISTER_SAMPLE(name, ParamType)                 \
-  NNVM_REGISTER_OP(name)                                                \
-  .set_num_inputs(0)                                                    \
-  .set_num_outputs(1)                                                   \
-  .set_attr_parser(ParamParser<ParamType>)                              \
-  .set_attr<nnvm::FInferShape>("FInferShape", InitShape<ParamType>)     \
-  .set_attr<nnvm::FInferType>("FInferType", SampleOpType<ParamType>)    \
-  .set_attr<FResourceRequest>("FResourceRequest", SampleResource)       \
-  .add_arguments(ParamType::__FIELDS__())
+DMLC_REGISTER_PARAMETER(SampleUniformLikeParam);
+DMLC_REGISTER_PARAMETER(SampleNormalLikeParam);
+DMLC_REGISTER_PARAMETER(SampleGammaLikeParam);
+DMLC_REGISTER_PARAMETER(SampleExponentialLikeParam);
+DMLC_REGISTER_PARAMETER(SamplePoissonLikeParam);
+DMLC_REGISTER_PARAMETER(SampleNegBinomialLikeParam);
+DMLC_REGISTER_PARAMETER(SampleGenNegBinomialLikeParam);
+
+#define MXNET_OPERATOR_REGISTER_SAMPLE(name, ParamType)                                      \
+  NNVM_REGISTER_OP(name)                                                                     \
+  .set_num_inputs(0)                                                                         \
+  .set_num_outputs(1)                                                                        \
+  .set_attr_parser(ParamParser<ParamType>)                                                   \
+  .set_attr<nnvm::FInferShape>("FInferShape", InitShape<ParamType>)                          \
+  .set_attr<nnvm::FInferType>("FInferType", SampleOpType<ParamType>)                         \
+  .set_attr<FResourceRequest>("FResourceRequest", SampleResource)                            \
+  .add_arguments(ParamType::__FIELDS__())                                                    \
+  .set_attr<FInferStorageType>("FInferStorageType", InitStorageType<ParamType, true, false>) \
+  .set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, ParamType>)                              \
+  .set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, ParamType>)
+
+#define MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(name, ParamType)                              \
+  NNVM_REGISTER_OP(name)                                                                  \
+  .set_num_inputs(1)                                                                      \
+  .set_num_outputs(1)                                                                     \
+  .set_attr_parser(ParamParser<ParamType>)                                                \
+  .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>)                        \
+  .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)                           \
+  .set_attr<FResourceRequest>("FResourceRequest", SampleResource)                         \
+  .set_attr<nnvm::FIgnoreInputs>("FIgnoreInputs",                                         \
+    [](const NodeAttrs& attrs) { return std::vector<uint32_t>(1, 0); })                   \
+  .set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes)                              \
+  .add_arguments(ParamType::__FIELDS__())                                                 \
+  .add_argument("data", "NDArray-or-Symbol", "The input")                                 \
+  .set_attr<FInferStorageType>("FInferStorageType",                                       \
+                               ElemwiseStorageType<1, 1, false, true, false>)             \
+  .set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, ParamType>)                           \
+  .set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, ParamType>)
 
 // Add "uniform" alias for backward compatibility
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_uniform, SampleUniformParam)
@@ -63,10 +92,7 @@ Example::
    uniform(low=0, high=1, shape=(2,2)) = [[ 0.60276335,  0.85794562],
                                           [ 0.54488319,  0.84725171]]
 
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType", InitStorageType<SampleUniformParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, UniformSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, UniformSampler<cpu>>);
+)code" ADD_FILELINE);
 
 // Add "normal" alias for backward compatibility
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_normal, SampleNormalParam)
@@ -76,16 +102,14 @@ MXNET_OPERATOR_REGISTER_SAMPLE(_random_normal, SampleNormalParam)
 
 .. note:: The existing alias ``normal`` is deprecated.
 
-Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* (standard deviation).
+Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale*
+(standard deviation).
 
 Example::
 
    normal(loc=0, scale=1, shape=(2,2)) = [[ 1.89171135, -1.16881478],
                                           [-1.23474145,  1.55807114]]
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType", InitStorageType<SampleNormalParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, NormalSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, NormalSampler<cpu>>);
+)code" ADD_FILELINE);
 
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_gamma, SampleGammaParam)
 .add_alias("random_gamma")
@@ -97,10 +121,7 @@ Example::
 
    gamma(alpha=9, beta=0.5, shape=(2,2)) = [[ 7.10486984,  3.37695289],
                                             [ 3.91697288,  3.65933681]]
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType", InitStorageType<SampleGammaParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, GammaSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, GammaSampler<cpu>>);
+)code" ADD_FILELINE);
 
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_exponential, SampleExponentialParam)
 .add_alias("random_exponential")
@@ -112,11 +133,7 @@ Example::
 
    exponential(lam=4, shape=(2,2)) = [[ 0.0097189 ,  0.08999364],
                                       [ 0.04146638,  0.31715935]]
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType",
-                             InitStorageType<SampleExponentialParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, ExponentialSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, ExponentialSampler<cpu>>);
+)code" ADD_FILELINE);
 
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_poisson, SamplePoissonParam)
 .add_alias("random_poisson")
@@ -129,10 +146,7 @@ Example::
 
    poisson(lam=4, shape=(2,2)) = [[ 5.,  2.],
                                   [ 4.,  6.]]
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType", InitStorageType<SamplePoissonParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, PoissonSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, PoissonSampler<cpu>>);
+)code" ADD_FILELINE);
 
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_negative_binomial, SampleNegBinomialParam)
 .add_alias("random_negative_binomial")
@@ -146,11 +160,7 @@ Example::
 
    negative_binomial(k=3, p=0.4, shape=(2,2)) = [[ 4.,  7.],
                                                  [ 2.,  5.]]
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType",
-                             InitStorageType<SampleNegBinomialParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, NegativeBinomialSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, NegativeBinomialSampler<cpu>>);
+)code" ADD_FILELINE);
 
 MXNET_OPERATOR_REGISTER_SAMPLE(_random_generalized_negative_binomial, SampleGenNegBinomialParam)
 .add_alias("random_generalized_negative_binomial")
@@ -165,11 +175,98 @@ Example::
 
    generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = [[ 2.,  1.],
                                                                     [ 6.,  4.]]
-)code" ADD_FILELINE)
-.set_attr<FInferStorageType>("FInferStorageType",
-                             InitStorageType<SampleGenNegBinomialParam, true, false>)
-.set_attr<FCompute>("FCompute<cpu>", Sample_<cpu, GeneralizedNegativeBinomialSampler<cpu>>)
-.set_attr<FComputeEx>("FComputeEx<cpu>", SampleEx_<cpu, GeneralizedNegativeBinomialSampler<cpu>>);
+)code" ADD_FILELINE);
+
+
+// *_like operators
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_uniform_like, SampleUniformLikeParam)
+.describe(R"code(Draw random samples from a uniform distribution according to the input array shape.
+
+Samples are uniformly distributed over the half-open interval *[low, high)*
+(includes *low*, but excludes *high*).
+
+Example::
+
+   uniform(low=0, high=1, data=ones(2,2)) = [[ 0.60276335,  0.85794562],
+                                             [ 0.54488319,  0.84725171]]
+
+)code" ADD_FILELINE);
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_normal_like, SampleNormalLikeParam)
+.describe(R"code(Draw random samples from a normal (Gaussian) distribution according to the input array shape.
+
+Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale*
+(standard deviation).
+
+Example::
+
+   normal(loc=0, scale=1, data=ones(2,2)) = [[ 1.89171135, -1.16881478],
+                                             [-1.23474145,  1.55807114]]
+)code" ADD_FILELINE);
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_gamma_like, SampleGammaLikeParam)
+.describe(R"code(Draw random samples from a gamma distribution according to the input array shape.
+
+Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale).
+
+Example::
+
+   gamma(alpha=9, beta=0.5, data=ones(2,2)) = [[ 7.10486984,  3.37695289],
+                                               [ 3.91697288,  3.65933681]]
+)code" ADD_FILELINE);
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_exponential_like, SampleExponentialLikeParam)
+.describe(R"code(Draw random samples from an exponential distribution according to the input array shape.
+
+Samples are distributed according to an exponential distribution parametrized by *lambda* (rate).
+
+Example::
+
+   exponential(lam=4, data=ones(2,2)) = [[ 0.0097189 ,  0.08999364],
+                                         [ 0.04146638,  0.31715935]]
+)code" ADD_FILELINE);
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_poisson_like, SamplePoissonLikeParam)
+.describe(R"code(Draw random samples from a Poisson distribution according to the input array shape.
+
+Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate).
+Samples will always be returned as a floating point data type.
+
+Example::
+
+   poisson(lam=4, data=ones(2,2)) = [[ 5.,  2.],
+                                     [ 4.,  6.]]
+)code" ADD_FILELINE);
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_negative_binomial_like, SampleNegBinomialLikeParam)
+.describe(R"code(Draw random samples from a negative binomial distribution according to the input array shape.
+
+Samples are distributed according to a negative binomial distribution parametrized by
+*k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment).
+Samples will always be returned as a floating point data type.
+
+Example::
+
+   negative_binomial(k=3, p=0.4, data=ones(2,2)) = [[ 4.,  7.],
+                                                    [ 2.,  5.]]
+)code" ADD_FILELINE);
+
+MXNET_OPERATOR_REGISTER_SAMPLE_LIKE(_random_generalized_negative_binomial_like,
+                                    SampleGenNegBinomialLikeParam)
+.describe(R"code(Draw random samples from a generalized negative binomial distribution according to the
+input array shape.
+
+Samples are distributed according to a generalized negative binomial distribution parametrized by
+*mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the
+number of unsuccessful experiments (generalized to real numbers).
+Samples will always be returned as a floating point data type.
+
+Example::
+
+   generalized_negative_binomial(mu=2.0, alpha=0.3, data=ones(2,2)) = [[ 2.,  1.],
+                                                                       [ 6.,  4.]]
+)code" ADD_FILELINE);
 
 }  // namespace op
 }  // namespace mxnet
diff --git a/src/operator/random/sample_op.cu b/src/operator/random/sample_op.cu
index 7a593d0d36b..55c04a989a0 100644
--- a/src/operator/random/sample_op.cu
+++ b/src/operator/random/sample_op.cu
@@ -27,33 +27,26 @@
 namespace mxnet {
 namespace op {
 
-NNVM_REGISTER_OP(_random_uniform)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, UniformSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, UniformSampler<gpu>>);
-
-NNVM_REGISTER_OP(_random_normal)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, NormalSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, NormalSampler<gpu>>);
-
-NNVM_REGISTER_OP(_random_gamma)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, GammaSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, GammaSampler<gpu>>);
-
-NNVM_REGISTER_OP(_random_exponential)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, ExponentialSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, ExponentialSampler<gpu>>);
-
-NNVM_REGISTER_OP(_random_poisson)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, PoissonSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, PoissonSampler<gpu>>);
-
-NNVM_REGISTER_OP(_random_negative_binomial)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, NegativeBinomialSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, NegativeBinomialSampler<gpu>>);
-
-NNVM_REGISTER_OP(_random_generalized_negative_binomial)
-.set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, GeneralizedNegativeBinomialSampler<gpu>>)
-.set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, GeneralizedNegativeBinomialSampler<gpu>>);
+#define MXNET_OPERATOR_REGISTER_SAMPLE_GPU(name, ParamType)            \
+  NNVM_REGISTER_OP(name)                                               \
+  .set_attr<FCompute>("FCompute<gpu>", Sample_<gpu, ParamType>)        \
+  .set_attr<FComputeEx>("FComputeEx<gpu>", SampleEx_<gpu, ParamType>); \
+
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_uniform, SampleUniformParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_normal, SampleNormalParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_gamma, SampleGammaParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_exponential, SampleExponentialParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_poisson, SamplePoissonParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_negative_binomial, SampleNegBinomialParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_generalized_negative_binomial, SampleGenNegBinomialParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_uniform_like, SampleUniformLikeParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_normal_like, SampleNormalLikeParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_gamma_like, SampleGammaLikeParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_exponential_like, SampleExponentialLikeParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_poisson_like, SamplePoissonLikeParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_negative_binomial_like, SampleNegBinomialLikeParam)
+MXNET_OPERATOR_REGISTER_SAMPLE_GPU(_random_generalized_negative_binomial_like,
+                                   SampleGenNegBinomialLikeParam)
 
 }  // namespace op
 }  // namespace mxnet
diff --git a/src/operator/random/sample_op.h b/src/operator/random/sample_op.h
index a81b41a09af..b4d00e70722 100644
--- a/src/operator/random/sample_op.h
+++ b/src/operator/random/sample_op.h
@@ -38,12 +38,48 @@
 namespace mxnet {
 namespace op {
 
-struct SampleUniformParam : public dmlc::Parameter<SampleUniformParam> {
-  float low;
-  float high;
+
+struct SampleOpParam {
   TShape shape;
   std::string ctx;
   int dtype;
+};
+
+struct UniformParam {
+  float low;
+  float high;
+};
+
+struct NormalParam {
+  float loc;
+  float scale;
+};
+
+struct GammaParam {
+  float alpha;
+  float beta;
+};
+
+struct ExponentialParam {
+  float lam;
+};
+
+struct PoissonParam {
+  float lam;
+};
+
+struct NegBinomialParam {
+  int k;
+  float p;
+};
+
+struct GenNegBinomialParam {
+  float mu;
+  float alpha;
+};
+
+struct SampleUniformParam : public dmlc::Parameter<SampleUniformParam>,
+  UniformParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SampleUniformParam) {
     DMLC_DECLARE_FIELD(low).set_default(0.0f)
     .describe("Lower bound of the distribution.");
@@ -67,12 +103,8 @@ struct SampleUniformParam : public dmlc::Parameter<SampleUniformParam> {
   }
 };
 
-struct SampleNormalParam : public dmlc::Parameter<SampleNormalParam> {
-  float loc;
-  float scale;
-  TShape shape;
-  std::string ctx;
-  int dtype;
+struct SampleNormalParam : public dmlc::Parameter<SampleNormalParam>,
+  NormalParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SampleNormalParam) {
     DMLC_DECLARE_FIELD(loc).set_default(0.0f)
     .describe("Mean of the distribution.");
@@ -96,12 +128,8 @@ struct SampleNormalParam : public dmlc::Parameter<SampleNormalParam> {
   }
 };
 
-struct SampleGammaParam : public dmlc::Parameter<SampleGammaParam> {
-  float alpha;
-  float beta;
-  TShape shape;
-  std::string ctx;
-  int dtype;
+struct SampleGammaParam : public dmlc::Parameter<SampleGammaParam>,
+  GammaParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SampleGammaParam) {
     DMLC_DECLARE_FIELD(alpha).set_default(1.0f)
     .describe("Alpha parameter (shape) of the gamma distribution.");
@@ -125,11 +153,8 @@ struct SampleGammaParam : public dmlc::Parameter<SampleGammaParam> {
   }
 };
 
-struct SampleExponentialParam : public dmlc::Parameter<SampleExponentialParam> {
-  float lam;
-  TShape shape;
-  std::string ctx;
-  int dtype;
+struct SampleExponentialParam : public dmlc::Parameter<SampleExponentialParam>,
+  ExponentialParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SampleExponentialParam) {
     DMLC_DECLARE_FIELD(lam).set_default(1.0f)
     .describe("Lambda parameter (rate) of the exponential distribution.");
@@ -151,11 +176,8 @@ struct SampleExponentialParam : public dmlc::Parameter<SampleExponentialParam> {
   }
 };
 
-struct SamplePoissonParam : public dmlc::Parameter<SamplePoissonParam> {
-  float lam;
-  TShape shape;
-  std::string ctx;
-  int dtype;
+struct SamplePoissonParam : public dmlc::Parameter<SamplePoissonParam>,
+  PoissonParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SamplePoissonParam) {
     DMLC_DECLARE_FIELD(lam).set_default(1.0f)
     .describe("Lambda parameter (rate) of the Poisson distribution.");
@@ -177,12 +199,8 @@ struct SamplePoissonParam : public dmlc::Parameter<SamplePoissonParam> {
   }
 };
 
-struct SampleNegBinomialParam : public dmlc::Parameter<SampleNegBinomialParam> {
-  int k;
-  float p;
-  TShape shape;
-  std::string ctx;
-  int dtype;
+struct SampleNegBinomialParam : public dmlc::Parameter<SampleNegBinomialParam>,
+  NegBinomialParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SampleNegBinomialParam) {
     DMLC_DECLARE_FIELD(k).set_default(1)
     .describe("Limit of unsuccessful experiments.");
@@ -206,12 +224,8 @@ struct SampleNegBinomialParam : public dmlc::Parameter<SampleNegBinomialParam> {
   }
 };
 
-struct SampleGenNegBinomialParam : public dmlc::Parameter<SampleGenNegBinomialParam> {
-  float mu;
-  float alpha;
-  TShape shape;
-  std::string ctx;
-  int dtype;
+struct SampleGenNegBinomialParam : public dmlc::Parameter<SampleGenNegBinomialParam>,
+  GenNegBinomialParam, SampleOpParam {
   DMLC_DECLARE_PARAMETER(SampleGenNegBinomialParam) {
     DMLC_DECLARE_FIELD(mu).set_default(1.0f)
     .describe("Mean of the negative binomial distribution.");
@@ -235,6 +249,72 @@ struct SampleGenNegBinomialParam : public dmlc::Parameter<SampleGenNegBinomialPa
   }
 };
 
+struct SampleUniformLikeParam : public dmlc::Parameter<SampleUniformLikeParam>,
+  UniformParam {
+  DMLC_DECLARE_PARAMETER(SampleUniformLikeParam) {
+    DMLC_DECLARE_FIELD(low).set_default(0.0f)
+    .describe("Lower bound of the distribution.");
+    DMLC_DECLARE_FIELD(high).set_default(1.0f)
+    .describe("Upper bound of the distribution.");
+  }
+};
+
+struct SampleNormalLikeParam : public dmlc::Parameter<SampleNormalLikeParam>,
+  NormalParam {
+  DMLC_DECLARE_PARAMETER(SampleNormalLikeParam) {
+    DMLC_DECLARE_FIELD(loc).set_default(0.0f)
+    .describe("Mean of the distribution.");
+    DMLC_DECLARE_FIELD(scale).set_default(1.0f)
+    .describe("Standard deviation of the distribution.");
+  }
+};
+
+struct SampleGammaLikeParam : public dmlc::Parameter<SampleGammaLikeParam>,
+  GammaParam {
+  DMLC_DECLARE_PARAMETER(SampleGammaLikeParam) {
+    DMLC_DECLARE_FIELD(alpha).set_default(1.0f)
+    .describe("Alpha parameter (shape) of the gamma distribution.");
+    DMLC_DECLARE_FIELD(beta).set_default(1.0f)
+    .describe("Beta parameter (scale) of the gamma distribution.");
+  }
+};
+
+struct SampleExponentialLikeParam : public dmlc::Parameter<SampleExponentialLikeParam>,
+  ExponentialParam {
+  DMLC_DECLARE_PARAMETER(SampleExponentialLikeParam) {
+    DMLC_DECLARE_FIELD(lam).set_default(1.0f)
+    .describe("Lambda parameter (rate) of the exponential distribution.");
+  }
+};
+
+struct SamplePoissonLikeParam : public dmlc::Parameter<SamplePoissonLikeParam>,
+  PoissonParam {
+  DMLC_DECLARE_PARAMETER(SamplePoissonLikeParam) {
+    DMLC_DECLARE_FIELD(lam).set_default(1.0f)
+    .describe("Lambda parameter (rate) of the Poisson distribution.");
+  }
+};
+
+struct SampleNegBinomialLikeParam : public dmlc::Parameter<SampleNegBinomialLikeParam>,
+  NegBinomialParam {
+  DMLC_DECLARE_PARAMETER(SampleNegBinomialLikeParam) {
+    DMLC_DECLARE_FIELD(k).set_default(1)
+    .describe("Limit of unsuccessful experiments.");
+    DMLC_DECLARE_FIELD(p).set_default(1.0f)
+    .describe("Failure probability in each experiment.");
+  }
+};
+
+struct SampleGenNegBinomialLikeParam : public dmlc::Parameter<SampleGenNegBinomialLikeParam>,
+  GenNegBinomialParam {
+  DMLC_DECLARE_PARAMETER(SampleGenNegBinomialLikeParam) {
+    DMLC_DECLARE_FIELD(mu).set_default(1.0f)
+    .describe("Mean of the negative binomial distribution.");
+    DMLC_DECLARE_FIELD(alpha).set_default(1.0f)
+    .describe("Alpha (dispersion) parameter of the negative binomial distribution.");
+  }
+};
+
 using FSampleCompute = std::function<void (const nnvm::NodeAttrs& attrs,
                                            const OpContext& ctx,
                                            const OpReqType& req,
@@ -262,162 +342,289 @@ MSHADOW_FORCE_INLINE void GetSamplingTempData(DType p1, DType p2, const OpContex
   Copy(*parm2, Tensor<cpu, 1, DType>(&p2, Shape1(1)), s);
 }
 
-template<typename xpu, typename Sampler>
+template<typename xpu, typename ParamType>
+static inline void uniform_op(const nnvm::NodeAttrs& attrs,
+                              const OpContext& ctx,
+                              const OpReqType& req,
+                              TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const UniformParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GE(param.high, param.low) << "low must be less or equal to high in uniform distribution";
+  Tensor<xpu, 1, float> low, high;
+  GetSamplingTempData<xpu, float>(param.low, param.high, ctx,
+                                  &low, &high);
+  UniformSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(low, high, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
+static inline void normal_op(const nnvm::NodeAttrs& attrs,
+                             const OpContext& ctx,
+                             const OpReqType& req,
+                             TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const NormalParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GT(param.scale, 0) << "scale parameter in gaussian has to be positive";
+  Tensor<xpu, 1, float> loc, scale;
+  GetSamplingTempData<xpu, float>(param.loc, param.scale, ctx, &loc, &scale);
+  NormalSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(loc, scale, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
+static inline void gamma_op(const nnvm::NodeAttrs& attrs,
+                            const OpContext& ctx,
+                            const OpReqType& req,
+                            TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const GammaParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GT(param.alpha, 0) << "alpha parameter in gamma distribution has to be positive";
+  CHECK_GT(param.beta, 0) << "beta parameter in gamma distribution has to be positive";
+  Tensor<xpu, 1, float> alpha, beta;
+  GetSamplingTempData<xpu, float>(param.alpha, param.beta, ctx, &alpha, &beta);
+  GammaSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(alpha, beta, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
+static inline void exponential_op(const nnvm::NodeAttrs& attrs,
+                                  const OpContext& ctx,
+                                  const OpReqType& req,
+                                  TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const ExponentialParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GT(param.lam, 0) << "lambda parameter in exponential distribution has to be positive";
+  Tensor<xpu, 1, float> lam, dummy;
+  GetSamplingTempData<xpu, float>(param.lam, 0, ctx, &lam, &dummy);
+  ExponentialSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(lam, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
+static inline void poisson_op(const nnvm::NodeAttrs& attrs,
+                              const OpContext& ctx,
+                              const OpReqType& req,
+                              TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const PoissonParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GE(param.lam, 0) << "lambda parameter in poisson distribution has to be non-negative";
+  Tensor<xpu, 1, float> lam, dummy;
+  GetSamplingTempData<xpu, float>(param.lam, 0, ctx, &lam, &dummy);
+  PoissonSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(lam, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
+static inline void neg_binomial_op(const nnvm::NodeAttrs& attrs,
+                                   const OpContext& ctx,
+                                   const OpReqType& req,
+                                   TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const NegBinomialParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GE(param.k, 0) << "k parameter in negative binomial distribution has to be non-negative";
+  CHECK_GE(param.p, 0) << "p parameter in negative binomial distribution has to be non-negative";
+  Tensor<xpu, 1, float> k, p;
+  GetSamplingTempData<xpu, float>(param.k, param.p, ctx, &k, &p);
+  NegativeBinomialSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(k, p, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
+static inline void gen_neg_binomial_op(const nnvm::NodeAttrs& attrs,
+                                       const OpContext& ctx,
+                                       const OpReqType& req,
+                                       TBlob* outputs) {
+  Stream<xpu> *s = ctx.get_stream<xpu>();
+  const GenNegBinomialParam& param = nnvm::get<ParamType>(attrs.parsed);
+  CHECK_GE(param.mu, 0)
+    << "mu parameter in generalized negative binomial distribution has to be non-negative";
+  CHECK_GE(param.alpha, 0)
+    << "alpha parameter in generalized negative binomial distribution has to be non-negative";
+  Tensor<xpu, 1, float> mu, alpha;
+  GetSamplingTempData<xpu, float>(param.mu, param.alpha, ctx, &mu, &alpha);
+  GeneralizedNegativeBinomialSampler<xpu> sampler;
+  MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
+    RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
+    Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
+    sampler.Sample(mu, alpha, out, pgen, s);
+  });
+}
+
+template<typename xpu, typename ParamType>
 struct SampleMaster;
 
 template<typename xpu>
-struct SampleMaster<xpu, UniformSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SampleUniformParam& param = nnvm::get<SampleUniformParam>(attrs.parsed);
-    CHECK_GE(param.high, param.low) << "low must be less or equal to high in uniform distribution";
-    Tensor<xpu, 1, float> low, high;
-    GetSamplingTempData<xpu, float>(param.low, param.high, ctx,
-                                    &low, &high);
-    UniformSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(low, high, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleUniformParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    uniform_op<xpu, SampleUniformParam>(attrs, ctx, req, outputs);
   }
 };
 
 template<typename xpu>
-struct SampleMaster<xpu, NormalSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SampleNormalParam& param = nnvm::get<SampleNormalParam>(attrs.parsed);
-    CHECK_GT(param.scale, 0) << "scale parameter in gaussian has to be positive";
-    Tensor<xpu, 1, float> loc, scale;
-    GetSamplingTempData<xpu, float>(param.loc, param.scale, ctx, &loc, &scale);
-    NormalSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(loc, scale, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleUniformLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    uniform_op<xpu, SampleUniformLikeParam>(attrs, ctx, req, outputs);
   }
 };
 
 template<typename xpu>
-struct SampleMaster<xpu, GammaSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SampleGammaParam& param = nnvm::get<SampleGammaParam>(attrs.parsed);
-    CHECK_GT(param.alpha, 0) << "alpha parameter in gamma distribution has to be positive";
-    CHECK_GT(param.beta, 0) << "beta parameter in gamma distribution has to be positive";
-    Tensor<xpu, 1, float> alpha, beta;
-    GetSamplingTempData<xpu, float>(param.alpha, param.beta, ctx, &alpha, &beta);
-    GammaSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(alpha, beta, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleNormalParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    normal_op<xpu, SampleNormalParam>(attrs, ctx, req, outputs);
   }
 };
 
 template<typename xpu>
-struct SampleMaster<xpu, ExponentialSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SampleExponentialParam& param = nnvm::get<SampleExponentialParam>(attrs.parsed);
-    CHECK_GT(param.lam, 0) << "lambda parameter in exponential distribution has to be positive";
-    Tensor<xpu, 1, float> lam, dummy;
-    GetSamplingTempData<xpu, float>(param.lam, 0, ctx, &lam, &dummy);
-    ExponentialSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(lam, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleNormalLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    normal_op<xpu, SampleNormalLikeParam>(attrs, ctx, req, outputs);
   }
 };
 
 template<typename xpu>
-struct SampleMaster<xpu, PoissonSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SamplePoissonParam& param = nnvm::get<SamplePoissonParam>(attrs.parsed);
-    CHECK_GE(param.lam, 0) << "lambda parameter in poisson distribution has to be non-negative";
-    Tensor<xpu, 1, float> lam, dummy;
-    GetSamplingTempData<xpu, float>(param.lam, 0, ctx, &lam, &dummy);
-    PoissonSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(lam, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleGammaParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    gamma_op<xpu, SampleGammaParam>(attrs, ctx, req, outputs);
   }
 };
 
 template<typename xpu>
-struct SampleMaster<xpu, NegativeBinomialSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SampleNegBinomialParam& param = nnvm::get<SampleNegBinomialParam>(attrs.parsed);
-    CHECK_GE(param.k, 0) << "k parameter in negative binomial distribution has to be non-negative";
-    CHECK_GE(param.p, 0) << "p parameter in negative binomial distribution has to be non-negative";
-    Tensor<xpu, 1, float> k, p;
-    GetSamplingTempData<xpu, float>(param.k, param.p, ctx, &k, &p);
-    NegativeBinomialSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(k, p, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleGammaLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    gamma_op<xpu, SampleGammaLikeParam>(attrs, ctx, req, outputs);
   }
 };
 
 template<typename xpu>
-struct SampleMaster<xpu, GeneralizedNegativeBinomialSampler<xpu>> {
-  static void op(const nnvm::NodeAttrs& attrs,
-                 const OpContext& ctx,
-                 const OpReqType& req,
-                 TBlob* outputs) {
-    Stream<xpu> *s = ctx.get_stream<xpu>();
-    const SampleGenNegBinomialParam& param = nnvm::get<SampleGenNegBinomialParam>(attrs.parsed);
-    CHECK_GE(param.mu, 0)
-      << "mu parameter in generalized negative binomial distribution has to be non-negative";
-    CHECK_GE(param.alpha, 0)
-      << "alpha parameter in generalized negative binomial distribution has to be non-negative";
-    Tensor<xpu, 1, float> mu, alpha;
-    GetSamplingTempData<xpu, float>(param.mu, param.alpha, ctx, &mu, &alpha);
-    GeneralizedNegativeBinomialSampler<xpu> sampler;
-    MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
-      RandGenerator<xpu, OType> *pgen = ctx.requested[0].get_parallel_random<xpu, OType>();
-      Tensor<xpu, 1, OType> out = outputs->FlatTo1D<xpu, OType>(s);
-      sampler.Sample(mu, alpha, out, pgen, s);
-    });
+struct SampleMaster<xpu, SampleExponentialParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    exponential_op<xpu, SampleExponentialParam>(attrs, ctx, req, outputs);
+  }
+};
+
+template<typename xpu>
+struct SampleMaster<xpu, SampleExponentialLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    exponential_op<xpu, SampleExponentialLikeParam>(attrs, ctx, req, outputs);
+  }
+};
+
+template<typename xpu>
+struct SampleMaster<xpu, SamplePoissonParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    poisson_op<xpu, SamplePoissonParam>(attrs, ctx, req, outputs);
+  }
+};
+
+template<typename xpu>
+struct SampleMaster<xpu, SamplePoissonLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    poisson_op<xpu, SamplePoissonLikeParam>(attrs, ctx, req, outputs);
+  }
+};
+
+template<typename xpu>
+struct SampleMaster<xpu, SampleNegBinomialParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    neg_binomial_op<xpu, SampleNegBinomialParam>(attrs, ctx, req, outputs);
+  }
+};
+
+template<typename xpu>
+struct SampleMaster<xpu, SampleNegBinomialLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    neg_binomial_op<xpu, SampleNegBinomialLikeParam>(attrs, ctx, req, outputs);
+  }
+};
+
+template<typename xpu>
+struct SampleMaster<xpu, SampleGenNegBinomialParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    gen_neg_binomial_op<xpu, SampleGenNegBinomialParam>(attrs, ctx, req, outputs);
   }
 };
 
-template<typename xpu, typename Sampler>
+template<typename xpu>
+struct SampleMaster<xpu, SampleGenNegBinomialLikeParam> {
+  static inline void op(const nnvm::NodeAttrs& attrs,
+                        const OpContext& ctx,
+                        const OpReqType& req,
+                        TBlob* outputs) {
+    gen_neg_binomial_op<xpu, SampleGenNegBinomialLikeParam>(attrs, ctx, req, outputs);
+  }
+};
+
+
+template<typename xpu, typename ParamType>
 void SampleComputeEx_(const nnvm::NodeAttrs& attrs,
                       const OpContext& ctx,
                       const std::vector<NDArray>& inputs,
                       const std::vector<OpReqType>& req,
                       const std::vector<NDArray>& outputs,
-                      SampleMaster<xpu, Sampler> sample_master) {
+                      SampleMaster<xpu, ParamType> sample_master) {
   using namespace mxnet::op;
   NDArray output = outputs[0];
   mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
@@ -438,24 +645,24 @@ void SampleComputeEx_(const nnvm::NodeAttrs& attrs,
   }
 }
 
-template<typename xpu, typename Sampler>
+template<typename xpu, typename ParamType>
 void Sample_(const nnvm::NodeAttrs& attrs,
              const OpContext& ctx,
              const std::vector<TBlob>& inputs,
              const std::vector<OpReqType>& req,
              const std::vector<TBlob>& outputs) {
   TBlob out = outputs[0];
-  SampleMaster<xpu, Sampler>::op(attrs, ctx, req[0], &out);
+  SampleMaster<xpu, ParamType>::op(attrs, ctx, req[0], &out);
 }
 
-template<typename xpu, typename Sampler>
+template<typename xpu, typename ParamType>
 void SampleEx_(const nnvm::NodeAttrs& attrs,
                const OpContext& ctx,
                const std::vector<NDArray>& inputs,
                const std::vector<OpReqType>& req,
                const std::vector<NDArray>& outputs) {
-  SampleMaster<xpu, Sampler> sample_master;
-  SampleComputeEx_<xpu, Sampler>(attrs, ctx, inputs, req, outputs, sample_master);
+  SampleMaster<xpu, ParamType> sample_master;
+  SampleComputeEx_<xpu, ParamType>(attrs, ctx, inputs, req, outputs, sample_master);
 }
 
 template<typename ParamType>
diff --git a/tests/python/unittest/test_random.py b/tests/python/unittest/test_random.py
index 4310658ae0b..6a59d8627ba 100644
--- a/tests/python/unittest/test_random.py
+++ b/tests/python/unittest/test_random.py
@@ -45,6 +45,16 @@ def check_with_device(device, dtype):
                 ('std',  lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
             ]
         },
+        {
+            'name': 'normal_like',
+            'symbol': mx.sym.random.normal_like,
+            'ndop': mx.nd.random.normal_like,
+            'params': { 'loc': 10.0, 'scale': 0.5 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']),  tol),
+                ('std',  lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
+            ]
+        },
         {
             'name': 'randn',
             'ndop': mx.nd.random.randn,
@@ -66,62 +76,122 @@ def check_with_device(device, dtype):
             ]
         },
         {
-                'name': 'gamma',
-                'symbol': mx.sym.random.gamma,
-                'ndop': mx.nd.random.gamma,
-                'params': { 'alpha': 9.0, 'beta': 0.5 },
-                'inputs': [ ('alpha', [ [ 0.0, 2.5 ], [ 9.75, 11.0 ] ]) , ('beta', [ [ 1.0, 0.7 ], [ 0.5, 0.3 ] ]) ],
-                'checks': [
-                    ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),
-                    ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)
-                ]
-            },
-            {
-                'name': 'exponential',
-                'symbol': mx.sym.random.exponential,
-                'ndop': mx.nd.random.exponential,
-                'params': { 'scale': 1.0/4.0 },
-                'inputs': [ ('scale', [ [ 1.0/1.0, 1.0/8.5 ], [ 1.0/2.7 , 1.0/0.5 ] ]) ],
-                'checks': [
-                    ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['scale'], tol),
-                    ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
-                ]
-            },
-            {
-                'name': 'poisson',
-                'symbol': mx.sym.random.poisson,
-                'ndop': mx.nd.random.poisson,
-                'params': { 'lam': 4.0 },
-                'inputs': [ ('lam', [ [ 25.0, 8.5 ], [ 2.7 , 0.5 ] ]) ],
-                'checks': [
-                    ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),
-                    ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)
-                ]
-            },
-            {
-                'name': 'neg-binomial',
-                'symbol': mx.sym.random.negative_binomial,
-                'ndop': mx.nd.random.negative_binomial,
-                'params': { 'k': 3, 'p': 0.4 },
-                'inputs': [ ('k', [ [ 3, 4 ], [ 5 , 6 ] ]) , ('p', [ [ 0.4 , 0.77 ], [ 0.5, 0.84 ] ]) ],
-                'checks': [
-                    ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) /  params['p'], tol),
-                    ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)
-                ]
-            },
-            {
-                'name': 'gen-neg-binomial',
-                'symbol': mx.sym.random.generalized_negative_binomial,
-                'ndop': mx.nd.random.generalized_negative_binomial,
-                'params': { 'mu': 2.0, 'alpha': 0.3 },
-                'inputs': [ ('mu', [ [ 2.0, 2.5 ], [ 1.3, 1.9 ] ]) , ('alpha', [ [ 1.0, 0.1 ], [ 0.2, 0.5 ] ]) ],
-                'checks': [
-                    ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),
-                    ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)
-                ]
-            }
-
-        ]
+            'name': 'uniform_like',
+            'symbol': mx.sym.random.uniform_like,
+            'ndop': mx.nd.random.uniform_like,
+            'params': { 'low': -1.5, 'high': 3.0 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - (params['low'] + params['high']) / 2.0, tol),
+                ('std', lambda x,  params: np.std(x.astype(np.float64)) - np.sqrt(1.0 / 12.0) * (params['high'] - params['low']), tol)
+            ]
+        },
+        {
+            'name': 'gamma',
+            'symbol': mx.sym.random.gamma,
+            'ndop': mx.nd.random.gamma,
+            'params': { 'alpha': 9.0, 'beta': 0.5 },
+            'inputs': [ ('alpha', [ [ 0.0, 2.5 ], [ 9.75, 11.0 ] ]) , ('beta', [ [ 1.0, 0.7 ], [ 0.5, 0.3 ] ]) ],
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)
+            ]
+        },
+        {
+            'name': 'gamma_like',
+            'symbol': mx.sym.random.gamma_like,
+            'ndop': mx.nd.random.gamma_like,
+            'params': { 'alpha': 9.0, 'beta': 0.5 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)
+            ]
+        },
+        {
+            'name': 'exponential',
+            'symbol': mx.sym.random.exponential,
+            'ndop': mx.nd.random.exponential,
+            'params': { 'scale': 1.0/4.0 },
+            'inputs': [ ('scale', [ [ 1.0/1.0, 1.0/8.5 ], [ 1.0/2.7 , 1.0/0.5 ] ]) ],
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['scale'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)
+            ]
+        },
+        {
+            'name': 'exponential_like',
+            'symbol': mx.sym.random.exponential_like,
+            'ndop': mx.nd.random.exponential_like,
+            'params': { 'lam': 4.0 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - 1.0/params['lam'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - 1.0/params['lam'], tol)
+            ]
+        },
+        {
+            'name': 'poisson',
+            'symbol': mx.sym.random.poisson,
+            'ndop': mx.nd.random.poisson,
+            'params': { 'lam': 4.0 },
+            'inputs': [ ('lam', [ [ 25.0, 8.5 ], [ 2.7 , 0.5 ] ]) ],
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)
+            ]
+        },
+        {
+            'name': 'poisson_like',
+            'symbol': mx.sym.random.poisson_like,
+            'ndop': mx.nd.random.poisson_like,
+            'params': { 'lam': 4.0 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)
+            ]
+        },
+        {
+            'name': 'neg_binomial',
+            'symbol': mx.sym.random.negative_binomial,
+            'ndop': mx.nd.random.negative_binomial,
+            'params': { 'k': 3, 'p': 0.4 },
+            'inputs': [ ('k', [ [ 3, 4 ], [ 5 , 6 ] ]) , ('p', [ [ 0.4 , 0.77 ], [ 0.5, 0.84 ] ]) ],
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) /  params['p'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)
+            ]
+        },
+        {
+            'name': 'neg_binomial_like',
+            'symbol': mx.sym.random.negative_binomial_like,
+            'ndop': mx.nd.random.negative_binomial_like,
+            'params': { 'k': 3, 'p': 0.4 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) /  params['p'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)
+            ]
+        },
+        {
+            'name': 'gen_neg_binomial',
+            'symbol': mx.sym.random.generalized_negative_binomial,
+            'ndop': mx.nd.random.generalized_negative_binomial,
+            'params': { 'mu': 2.0, 'alpha': 0.3 },
+            'inputs': [ ('mu', [ [ 2.0, 2.5 ], [ 1.3, 1.9 ] ]) , ('alpha', [ [ 1.0, 0.1 ], [ 0.2, 0.5 ] ]) ],
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)
+            ]
+        },
+        {
+            'name': 'gen_neg_binomial_like',
+            'symbol': mx.sym.random.generalized_negative_binomial_like,
+            'ndop': mx.nd.random.generalized_negative_binomial_like,
+            'params': { 'mu': 2.0, 'alpha': 0.3 },
+            'checks': [
+                ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),
+                ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)
+            ]
+        },
+
+    ]
 
     # Create enough samples such that we get a meaningful distribution.
     shape = (500, 500)
@@ -136,6 +206,10 @@ def check_with_device(device, dtype):
         if name == 'randn':
             params.pop('shape')  # randn does not accept shape param
             args = shape
+        if name.endswith('_like'):
+            params['data'] = mx.nd.ones(params.pop('shape'),
+                                        dtype=params.pop('dtype'),
+                                        ctx=params.pop('ctx'))
         mx.random.seed(128)
         ret1 = ndop(*args, **params).asnumpy()
         mx.random.seed(128)
@@ -171,6 +245,8 @@ def check_with_device(device, dtype):
         X = mx.sym.Variable("X")
         params = symbdic['params'].copy()
         params.update(shape=shape, dtype=dtype)
+        if name.endswith('_like'):
+            params['data'] = mx.sym.ones(params.pop('shape'))
         Y = symbol(**params) + X
         x = mx.nd.zeros(shape, dtype=dtype, ctx=device)
         xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=device)
@@ -189,6 +265,7 @@ def check_with_device(device, dtype):
         ret1 = un1.asnumpy()
         for check_name, check_func, tol in symbdic['checks']:
             assert np.abs(check_func(ret1, params)) < tol, "symbolic test: %s check for `%s` did not pass" % (check_name, name)
+        if name.endswith('_like'): continue
 
         # check multi-distribution sampling
         symbol = symbdic['symbol']


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services