You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/10/03 18:41:47 UTC

[GitHub] eric-haibin-lin closed pull request #12694: [MXNET-860] Use emplace where helpful

eric-haibin-lin closed pull request #12694: [MXNET-860] Use emplace where helpful
URL: https://github.com/apache/incubator-mxnet/pull/12694
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/.clang-tidy b/.clang-tidy
index af99026e5d7..0080662285b 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -54,7 +54,8 @@ Checks: >
 
 # In order to trigger an error, you must have a rule defined both in checks and in this section.
 WarningsAsErrors: >
-    cppcoreguidelines-no-malloc, modernize-use-nullptr, performance-unnecessary-copy-initialization
+    cppcoreguidelines-no-malloc, modernize-use-nullptr, performance-unnecessary-copy-initialization,
+    modernize-use-emplace
 
 # Todo: define a better regex match that includes most project headers, but excludes third party
 # code.
diff --git a/src/c_api/c_api_executor.cc b/src/c_api/c_api_executor.cc
index c3a64736c01..1f936b16432 100644
--- a/src/c_api/c_api_executor.cc
+++ b/src/c_api/c_api_executor.cc
@@ -168,7 +168,7 @@ int MXExecutorBindEX(SymbolHandle symbol_handle,
   for (mx_uint i = 0; i < len; ++i) {
     in_args_vec.push_back(*(in_args_ptr[i]));
     if (arg_grad_ptr[i] == nullptr) {
-      arg_grad_vec.push_back(NDArray());
+      arg_grad_vec.emplace_back();
       grad_req_vec.push_back(kNullOp);
     } else {
       arg_grad_vec.push_back(*(arg_grad_ptr[i]));
diff --git a/src/c_api/c_api_ndarray.cc b/src/c_api/c_api_ndarray.cc
index 34bd4b20aa5..18f6c411e03 100644
--- a/src/c_api/c_api_ndarray.cc
+++ b/src/c_api/c_api_ndarray.cc
@@ -176,7 +176,7 @@ int MXCreateCachedOpEx(SymbolHandle handle,
   API_BEGIN();
   std::vector<std::pair<std::string, std::string> > flags;
   for (int i = 0; i < num_flags; ++i) {
-    flags.push_back({keys[i], vals[i]});
+    flags.emplace_back(keys[i], vals[i]);
   }
   *out = new CachedOpPtr(new CachedOp(*sym, flags));
   API_END();
diff --git a/src/c_api/c_predict_api.cc b/src/c_api/c_predict_api.cc
index 24358e44947..cae5c68aa51 100644
--- a/src/c_api/c_predict_api.cc
+++ b/src/c_api/c_predict_api.cc
@@ -191,7 +191,7 @@ int _CreatePartialOut(const char* symbol_json_str,
       if (known_shape.count(key) != 0) {
         in_shapes.push_back(known_shape[key]);
       } else {
-        in_shapes.push_back(TShape());
+        in_shapes.emplace_back();
       }
     }
     nnvm::Graph g; g.outputs = sym.outputs;
@@ -369,7 +369,7 @@ int MXPredReshape(mx_uint num_input_nodes,
       if (new_shape.count(key) != 0) {
         in_shapes.push_back(new_shape[key]);
       } else {
-        in_shapes.push_back(TShape());
+        in_shapes.emplace_back();
       }
     }
     nnvm::Graph g; g.outputs = ret->sym.outputs;
diff --git a/src/io/iter_mnist.cc b/src/io/iter_mnist.cc
index 40223472c96..139cf47d63e 100644
--- a/src/io/iter_mnist.cc
+++ b/src/io/iter_mnist.cc
@@ -124,11 +124,11 @@ class MNISTIter: public IIterator<TBlobBatch> {
       batch_label_.dptr_ = &labels_[loc_];
       out_.data.clear();
       if (param_.flat) {
-          out_.data.push_back(TBlob(batch_data_.FlatTo2D()));
+          out_.data.emplace_back(batch_data_.FlatTo2D());
       } else {
-          out_.data.push_back(TBlob(batch_data_));
+          out_.data.emplace_back(batch_data_);
       }
-      out_.data.push_back(TBlob(batch_label_));
+      out_.data.emplace_back(batch_label_);
       loc_ += param_.batch_size;
       return true;
     } else {
diff --git a/src/kvstore/comm_tree.h b/src/kvstore/comm_tree.h
index 1ebfcdc8010..e857f33687e 100644
--- a/src/kvstore/comm_tree.h
+++ b/src/kvstore/comm_tree.h
@@ -395,7 +395,7 @@ class CommDeviceTree : public CommDevice {
     // 2) Force copy_buf to be of kRecvBufferSize
     // 3) Do not use greedy assignment; all keys are assigned to each GPU
     for (unsigned i = 0; i < devs_.size(); ++i)
-      tree_merge_buf_.push_back(std::unordered_map<int, TreeBufferEntry>());
+      tree_merge_buf_.emplace_back();
 
     bool delay_alloc = true;
     std::map<int, int> key_dist;
@@ -457,7 +457,7 @@ class CommDeviceTree : public CommDevice {
               if (row == devs_.size()-1)
                 shape_copy[0] = last_slice;
               buf.merged[row] = NDArray(shape_copy, ctx, delay_alloc, type);
-              buf.copy_buf.push_back(std::vector<NDArray>());
+              buf.copy_buf.emplace_back();
               if (buf.copy_buf[row].empty()) {
                 buf.copy_buf[row].resize(kBranch-1);
                 for (size_t col = 0; col < buf.copy_buf[0].size(); ++col) {
@@ -469,9 +469,9 @@ class CommDeviceTree : public CommDevice {
               }
             }
           } else {
-            buf.merged.push_back(NDArray(shape, ctx, false, type));
+            buf.merged.emplace_back(shape, ctx, false, type);
             if (buf.copy_buf.empty()) {
-              buf.copy_buf.push_back(std::vector<NDArray>());
+              buf.copy_buf.emplace_back();
               buf.copy_buf[0].resize(kBranch-1);
               for (size_t col = 0; col < buf.copy_buf[0].size(); ++col) {
                 buf.copy_buf[0][col] = NDArray(buf.merged[0].shape(),
diff --git a/src/nnvm/legacy_json_util.cc b/src/nnvm/legacy_json_util.cc
index 935a64c7c22..0697aebbd0d 100644
--- a/src/nnvm/legacy_json_util.cc
+++ b/src/nnvm/legacy_json_util.cc
@@ -60,7 +60,7 @@ Graph UpgradeJSON_FixParsing(Graph g) {
         for (const auto key : kHiddenKeys) {
           size_t pos = it->first.rfind(key);
           if (pos == 0 || (pos != std::string::npos && pos == it->first.length() - key.length())) {
-            hidden_keys.push_back(*it);
+            hidden_keys.emplace_back(*it);
             erase = true;
             break;
           }
diff --git a/src/nnvm/legacy_op_util.cc b/src/nnvm/legacy_op_util.cc
index 4260e685601..63be619beb1 100644
--- a/src/nnvm/legacy_op_util.cc
+++ b/src/nnvm/legacy_op_util.cc
@@ -288,8 +288,7 @@ std::vector<std::pair<int, int> > OpPropInplaceOption(const NodeAttrs& attrs) {
   }
   std::vector<std::pair<int, int> > forward_inplace;
   for (auto& kv : prop.ptr->ForwardInplaceOption(in_data, out_addr)) {
-    forward_inplace.push_back(
-        std::make_pair(kv.first, *static_cast<int*>(kv.second)));
+    forward_inplace.emplace_back(kv.first, *static_cast<int*>(kv.second));
   }
   return forward_inplace;
 }
diff --git a/src/operator/control_flow.cc b/src/operator/control_flow.cc
index ba7f5c0ad8b..25c8f704cbc 100644
--- a/src/operator/control_flow.cc
+++ b/src/operator/control_flow.cc
@@ -1268,7 +1268,7 @@ NNVM_REGISTER_OP(_foreach)
     [](const NodeAttrs& attrs) {
   const ForeachParam& params = nnvm::get<ForeachParam>(attrs.parsed);
   std::vector<std::string> names;
-  names.push_back("fn");
+  names.emplace_back("fn");
   for (int i = 0; i < params.num_args - 1; i++)
     names.push_back("data" + std::to_string(i));
   return names;
@@ -1330,8 +1330,8 @@ NNVM_REGISTER_OP(_while_loop)
   const WhileLoopParam& params = nnvm::get<WhileLoopParam>(attrs.parsed);
   std::vector<std::string> names;
   names.reserve(params.num_args);
-  names.push_back("cond");
-  names.push_back("func");
+  names.emplace_back("cond");
+  names.emplace_back("func");
   for (int i = 2; i < params.num_args; i++)
     names.push_back("data" + std::to_string(i - 2));
   return names;
@@ -1392,9 +1392,9 @@ NNVM_REGISTER_OP(_cond)
   const CondParam& params = nnvm::get<CondParam>(attrs.parsed);
   std::vector<std::string> names;
   names.reserve(params.num_args);
-  names.push_back("cond");
-  names.push_back("then_branch");
-  names.push_back("else_branch");
+  names.emplace_back("cond");
+  names.emplace_back("then_branch");
+  names.emplace_back("else_branch");
   for (int i = 3; i < params.num_args; ++i)
     names.push_back("data" + std::to_string(i - 3));
   return names;
diff --git a/src/operator/custom/custom.cc b/src/operator/custom/custom.cc
index 4cda1375fd2..2061324e9e2 100644
--- a/src/operator/custom/custom.cc
+++ b/src/operator/custom/custom.cc
@@ -81,7 +81,7 @@ std::vector<std::string> List(const NodeAttrs& attrs) {
       &args, params.info->contexts[Type]));
   std::vector<std::string> ret;
   for (int i = 0; args[i] != nullptr; ++i) {
-    ret.push_back(args[i]);
+    ret.emplace_back(args[i]);
   }
   return ret;
 }
diff --git a/src/operator/custom/native_op-inl.h b/src/operator/custom/native_op-inl.h
index d2fb1149f7b..3c222071a4d 100644
--- a/src/operator/custom/native_op-inl.h
+++ b/src/operator/custom/native_op-inl.h
@@ -185,7 +185,7 @@ class NativeOpProp : public OperatorProperty {
     param_.pinfo->list_arguments(&args, param_.pinfo->p_list_arguments);
     std::vector<std::string> ret;
     for (int i = 0; args[i] != NULL; ++i) {
-      ret.push_back(args[i]);
+      ret.emplace_back(args[i]);
     }
     return ret;
   }
@@ -195,7 +195,7 @@ class NativeOpProp : public OperatorProperty {
     param_.pinfo->list_outputs(&args, param_.pinfo->p_list_outputs);
     std::vector<std::string> ret;
     for (int i = 0; args[i] != NULL; ++i) {
-      ret.push_back(args[i]);
+      ret.emplace_back(args[i]);
     }
     return ret;
   }
diff --git a/src/operator/custom/ndarray_op-inl.h b/src/operator/custom/ndarray_op-inl.h
index 20624d2d467..02c5630b60f 100644
--- a/src/operator/custom/ndarray_op-inl.h
+++ b/src/operator/custom/ndarray_op-inl.h
@@ -87,7 +87,7 @@ class NDArrayOpProp : public OperatorProperty {
     CHECK(param_.pinfo->list_arguments(&args, param_.pinfo->p_list_arguments));
     std::vector<std::string> ret;
     for (int i = 0; args[i] != NULL; ++i) {
-      ret.push_back(args[i]);
+      ret.emplace_back(args[i]);
     }
     return ret;
   }
@@ -97,7 +97,7 @@ class NDArrayOpProp : public OperatorProperty {
     CHECK(param_.pinfo->list_outputs(&args, param_.pinfo->p_list_outputs));
     std::vector<std::string> ret;
     for (int i = 0; args[i] != NULL; ++i) {
-      ret.push_back(args[i]);
+      ret.emplace_back(args[i]);
     }
     return ret;
   }
diff --git a/src/operator/optimizer_op.cc b/src/operator/optimizer_op.cc
index cf126ed58ea..6c44f99c144 100644
--- a/src/operator/optimizer_op.cc
+++ b/src/operator/optimizer_op.cc
@@ -384,7 +384,7 @@ only the row slices whose indices appear in grad.indices are updated (for both w
   [](const NodeAttrs& attrs, const int dev_mask, const DispatchMode dispatch_mode) {
     std::vector<ResourceRequest> request;
     if (dispatch_mode == DispatchMode::kFComputeEx) {
-      request.push_back(ResourceRequest::kTempSpace);
+      request.emplace_back(ResourceRequest::kTempSpace);
     }
     return request;
   })
diff --git a/src/operator/rnn-inl.h b/src/operator/rnn-inl.h
index 9211f6a456f..3901a805b9d 100644
--- a/src/operator/rnn-inl.h
+++ b/src/operator/rnn-inl.h
@@ -608,9 +608,9 @@ class RNNProp : public OperatorProperty {
     if (!param_.state_outputs)
       return outputs;
     else
-      outputs.push_back("state");
+      outputs.emplace_back("state");
     if (param_.mode == rnn_enum::kLstm)
-      outputs.push_back("state_cell");
+      outputs.emplace_back("state_cell");
     return outputs;
   }
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services