You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/11/19 14:03:20 UTC

[GitHub] gigasquid closed pull request #12356: [MXNET-860] Use modernized ranged loops where possible

gigasquid closed pull request #12356: [MXNET-860] Use modernized ranged loops where possible
URL: https://github.com/apache/incubator-mxnet/pull/12356
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/src/c_api/c_api_symbolic.cc b/src/c_api/c_api_symbolic.cc
index d4625de8011..73a8a7ca6f8 100644
--- a/src/c_api/c_api_symbolic.cc
+++ b/src/c_api/c_api_symbolic.cc
@@ -269,8 +269,8 @@ int MXSymbolListAttr(SymbolHandle symbol,
   }
   *out_size = attr_list.size()/2;
   ret->ret_vec_charp.clear();
-  for (size_t i = 0; i < attr_list.size(); ++i) {
-    ret->ret_vec_charp.push_back(attr_list[i].c_str());
+  for (const auto& attr : attr_list) {
+    ret->ret_vec_charp.push_back(attr.c_str());
   }
   *out = dmlc::BeginPtr(ret->ret_vec_charp);
   API_END();
@@ -298,8 +298,8 @@ int MXSymbolListAttrShallow(SymbolHandle symbol,
   }
   *out_size = attr_list.size()/2;
   ret->ret_vec_charp.clear();
-  for (size_t i = 0; i < attr_list.size(); ++i) {
-    ret->ret_vec_charp.push_back(attr_list[i].c_str());
+  for (auto &attr : attr_list) {
+    ret->ret_vec_charp.push_back(attr.c_str());
   }
   *out = dmlc::BeginPtr(ret->ret_vec_charp);
   API_END();
diff --git a/src/c_api/c_predict_api.cc b/src/c_api/c_predict_api.cc
index cae5c68aa51..bd599e0b642 100644
--- a/src/c_api/c_predict_api.cc
+++ b/src/c_api/c_predict_api.cc
@@ -139,11 +139,11 @@ int _CreatePartialOut(const char* symbol_json_str,
     std::unordered_set<std::string> arg_names, aux_names;
     std::vector<std::string> arg_names_vec = sym.ListInputNames(Symbol::kReadOnlyArgs);
     std::vector<std::string> aux_names_vec = sym.ListInputNames(Symbol::kAuxiliaryStates);
-    for (size_t i = 0; i < arg_names_vec.size(); ++i) {
-      arg_names.insert(arg_names_vec[i]);
+    for (const auto &arg_name : arg_names_vec) {
+      arg_names.insert(arg_name);
     }
-    for (size_t i = 0; i < aux_names_vec.size(); ++i) {
-      aux_names.insert(aux_names_vec[i]);
+    for (const auto &aux_name : aux_names_vec) {
+      aux_names.insert(aux_name);
     }
     std::vector<NDArray> data;
     std::vector<std::string> names;
@@ -508,13 +508,13 @@ int MXNDListCreate(const char* nd_file_bytes,
     ret->keys.resize(arrays.size());
   }
   ret->indptr.push_back(0);
-  for (size_t i = 0; i < arrays.size(); ++i) {
-    TShape shape = arrays[i].shape();
+  for (auto &array : arrays) {
+    TShape shape = array.shape();
     size_t begin = ret->data.size();
     size_t size = shape.Size();
     ret->shapes.push_back(shape);
     ret->data.resize(begin + size);
-    arrays[i].SyncCopyToCPU(dmlc::BeginPtr(ret->data) + begin, size);
+    array.SyncCopyToCPU(dmlc::BeginPtr(ret->data) + begin, size);
     ret->indptr.push_back(begin + size);
   }
   *out = ret;
diff --git a/src/common/exec_utils.h b/src/common/exec_utils.h
index fbe544221a3..8de6f65253a 100644
--- a/src/common/exec_utils.h
+++ b/src/common/exec_utils.h
@@ -591,14 +591,14 @@ inline nnvm::Graph AssignContext(nnvm::Graph g,
 
   g.attrs["device"] = std::make_shared<dmlc::any>(std::move(device));
   g = nnvm::pass::PlaceDevice(g, "__ctx_group__", device_map, "_CrossDeviceCopy");
-  const auto& assigned_device = g.GetAttr<nnvm::DeviceVector>("device");
+  const auto& assigned_devices = g.GetAttr<nnvm::DeviceVector>("device");
 
   exec::ContextVector vcontext;
-  for (size_t i = 0; i < assigned_device.size(); ++i) {
-    if (assigned_device[i] == -1) {
+  for (auto context : assigned_devices) {
+    if (context == -1) {
       vcontext.push_back(default_ctx);
     } else {
-      vcontext.push_back(ctx_list[assigned_device[i]]);
+      vcontext.push_back(ctx_list[context]);
     }
   }
 
diff --git a/src/executor/attach_op_execs_pass.cc b/src/executor/attach_op_execs_pass.cc
index a0176fab0a0..fe3a6bd3d09 100644
--- a/src/executor/attach_op_execs_pass.cc
+++ b/src/executor/attach_op_execs_pass.cc
@@ -58,12 +58,10 @@ class StorageFallbackOpExecutor : public OpExecutor {
     if (!init_) {
       pre_temp_buf_.clear();
       post_temp_buf_.clear();
-      for (size_t i = 0; i < in_array.size(); i++) {
-        auto &nd = in_array[i];
+      for (const auto& nd : in_array) {
         pre_temp_buf_.emplace_back(nd.shape(), nd.ctx(), true, nd.dtype());
       }
-      for (size_t i = 0; i < out_array.size(); i++) {
-        auto &nd = out_array[i];
+      for (const auto& nd : out_array) {
         post_temp_buf_.emplace_back(nd.shape(), nd.ctx(), true, nd.dtype());
       }
       init_ = true;
diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc
index 136917a60d9..d866ad13557 100644
--- a/src/executor/graph_executor.cc
+++ b/src/executor/graph_executor.cc
@@ -1130,14 +1130,13 @@ void GraphExecutor::InitCachedOps() {
 
     // the variables
     std::vector<Engine::VarHandle> use_vars, mutate_vars;
-    for (size_t i = 0; i < exec->in_array.size(); ++i) {
-      auto& nd = exec->in_array[i];
+    for (const auto& nd : exec->in_array) {
       use_vars.push_back(nd.var());
     }
-    for (auto& r : exec->op_ctx.requested) {
+    for (const auto& r : exec->op_ctx.requested) {
       mutate_vars.push_back(r.var);
     }
-    for (auto& nd : exec->out_array) {
+    for (const auto& nd : exec->out_array) {
       mutate_vars.push_back(nd.var());
     }
     if (exec->var() != nullptr) {
@@ -1551,8 +1550,8 @@ static nnvm::Symbol PartitionGraph(const nnvm::Symbol& src,
   std::vector<Context> aux_state_ctxes(aux_states.size());
 
   size_t i1 = 0, i2 = 0;
-  for (size_t i = 0; i < input_names.size(); ++i) {
-    if (i2 < aux_names.size() && aux_names[i2] == input_names[i]) {
+  for (const auto& input_name : input_names) {
+    if (i2 < aux_names.size() && aux_names[i2] == input_name) {
       arg_shapes.push_back(aux_states[i2].shape());
       arg_dtypes.push_back(aux_states[i2].dtype());
       arg_stypes.push_back(aux_states[i2].storage_type());
@@ -1560,7 +1559,7 @@ static nnvm::Symbol PartitionGraph(const nnvm::Symbol& src,
       ++i2;
     } else {
       CHECK(i1 < arg_names.size());
-      CHECK_EQ(arg_names[i1], input_names[i]);
+      CHECK_EQ(arg_names[i1], input_name);
       arg_shapes.push_back(in_args->at(i1).shape());
       arg_dtypes.push_back(in_args->at(i1).dtype());
       arg_stypes.push_back(in_args->at(i1).storage_type());
diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc
index 1f115cd64ad..0ebd41787ba 100644
--- a/src/imperative/cached_op.cc
+++ b/src/imperative/cached_op.cc
@@ -278,10 +278,10 @@ bool CachedOp::SetForwardGraph(
   shape_inputs.reserve(inputs.size());
   dtype_inputs.reserve(inputs.size());
   storage_type_inputs.reserve(inputs.size());
-  for (uint32_t i = 0; i < inputs.size(); ++i) {
-    shape_inputs.emplace_back(inputs[i]->shape());
-    dtype_inputs.emplace_back(inputs[i]->dtype());
-    storage_type_inputs.emplace_back(inputs[i]->storage_type());
+  for (auto input : inputs) {
+    shape_inputs.emplace_back(input->shape());
+    dtype_inputs.emplace_back(input->dtype());
+    storage_type_inputs.emplace_back(input->storage_type());
   }
 
   bool match = true;
@@ -658,9 +658,9 @@ void CachedOp::StaticRunOps(
         arg_dtypes.clear();
         arg_shapes.reserve(ndinputs.size());
         arg_dtypes.reserve(ndinputs.size());
-        for (size_t i = 0; i < ndinputs.size(); ++i) {
-          arg_shapes.emplace_back(ndinputs[i]->shape());
-          arg_dtypes.emplace_back(ndinputs[i]->dtype());
+        for (auto& ndinput : ndinputs) {
+          arg_shapes.emplace_back(ndinput->shape());
+          arg_dtypes.emplace_back(ndinput->dtype());
         }
         state.op_states[i] = createop[node.source->op()](
             node.source->attrs, default_ctx, arg_shapes, arg_dtypes);
@@ -784,7 +784,9 @@ OpStatePtr CachedOp::DynamicForward(
   states.reserve(idx.num_nodes());
   std::vector<NDArray*> arrays;
   arrays.reserve(buff.size());
-  for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
+  for (auto& buffered_array : buff) {
+    arrays.push_back(&buffered_array);
+  }
   for (size_t i = 0; i < num_inputs; ++i) {
     arrays[idx.entry_id(idx.input_nodes()[i], 0)] = inputs[i];
   }
@@ -907,7 +909,9 @@ void CachedOp::DynamicBackward(
   buff.resize(idx.num_node_entries());
   std::vector<NDArray*> arrays;
   arrays.reserve(buff.size());
-  for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
+  for (auto& buffered_array : buff) {
+    arrays.push_back(&buffered_array);
+  }
   for (size_t i = 0; i < inputs.size(); ++i) {
     if (runtime.info.bwd_input_eid[i] == kEidNotExist) {
       continue;
@@ -1177,8 +1181,9 @@ void CachedOpBackward(const OpStatePtr& state_ptr,
       in_ptrs.push_back(&(*it));
   }
   CHECK_EQ(in_ptrs.size(), s.op->num_backward_inputs());
-  for (size_t i = 0; i < out_bufs.size(); i++)
-    out_ptrs.push_back(&out_bufs[i]);
+  for (auto& out_buf : out_bufs) {
+    out_ptrs.push_back(&out_buf);
+  }
   CHECK_EQ(out_ptrs.size(), s.op->num_backward_outputs());
   // Set is_training correct for the imperative executor.
   bool orig_is_train;
@@ -1283,8 +1288,8 @@ void CachedOpParamParser(nnvm::NodeAttrs* attrs) {
     nnvm::Symbol sym;
     sym.outputs = g.outputs;
     std::vector<std::pair<std::string, std::string> > flags;
-    for (auto it = attrs->dict.begin(); it != attrs->dict.end(); it++)
-      flags.emplace_back(it->first, it->second);
+    for (const auto& attr : attrs->dict)
+      flags.emplace_back(attr.first, attr.second);
     attrs->parsed = CachedOpPtr(new CachedOp(sym, flags));
   }
 }
diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc
index 0c5ff841775..e146573165b 100644
--- a/src/imperative/imperative.cc
+++ b/src/imperative/imperative.cc
@@ -189,8 +189,8 @@ void Imperative::RecordOp(
     std::vector<bool>* p_save_outputs) {
   MXAPIThreadLocalEntry *local_buff = MXAPIThreadLocalStore::Get();
 
-  for (uint32_t i = 0; i < outputs.size(); ++i) {
-    CHECK(AGInfo::IsNone(*(outputs[i])))
+  for (auto output : outputs) {
+    CHECK(AGInfo::IsNone(*output))
       << "Assigning to NDArrays that are already in a computational graph "
       << "will cause undefined behavior when evaluating gradients. "
       << "Please call backward first to clear the graph or do this out side of "
@@ -247,8 +247,8 @@ void Imperative::RecordOp(
     node->inputs[i] = inputs[i]->entry_;
   }
 
-  for (uint32_t i = 0; i < outputs.size(); ++i) {
-    CHECK(AGInfo::IsNone(*(outputs[i])))
+  for (auto output : outputs) {
+    CHECK(AGInfo::IsNone(*output))
       << "Inplace operations (+=, -=, x[:]=, etc) are not supported when "
       << "recording with autograd.";
   }
@@ -348,7 +348,7 @@ std::vector<NDArray*> Imperative::Backward(
       exec::AggregateGradient, nullptr, nullptr,
       zero_ops, "_copy");
   CHECK_EQ(g_graph.outputs.size(), xs.size());
-  for (const auto &e : g_graph.outputs) {
+  for (const auto& e : g_graph.outputs) {
     if (e.node->op() == nullptr) {
       auto node = Node::Create();
       node->attrs.op = copy_op;
@@ -375,7 +375,9 @@ std::vector<NDArray*> Imperative::Backward(
   std::vector<OpStatePtr> states;
   std::vector<NDArray*> arrays;
   arrays.reserve(buff.size());
-  for (size_t i = 0; i < buff.size(); ++i) arrays.push_back(&buff[i]);
+  for (auto& buffered_array : buff) {
+    arrays.push_back(&buffered_array);
+  }
   if (create_graph) {
     states.resize(num_forward_nodes);
     nnvm::DFSVisit(sym.outputs, [&](const nnvm::NodePtr& n) {
@@ -390,12 +392,12 @@ std::vector<NDArray*> Imperative::Backward(
         ref_count[eid] = 1;
       }
     });
-    for (size_t i = 0; i < ograd_entries.size(); ++i) {
-      AGInfo& info = AGInfo::Get(ograd_entries[i].node);
-      if (!idx.exist(ograd_entries[i].node.get())) continue;
-      size_t eid = idx.entry_id(ograd_entries[i]);
+    for (auto& ograd_entry : ograd_entries) {
+      AGInfo& info = AGInfo::Get(ograd_entry.node);
+      if (!idx.exist(ograd_entry.node.get())) continue;
+      size_t eid = idx.entry_id(ograd_entry);
       buff[eid] = info.outputs[0];
-      buff[eid].entry_ = ograd_entries[i];
+      buff[eid].entry_ = ograd_entry;
     }
   } else {
     states.reserve(num_forward_nodes);
@@ -409,10 +411,10 @@ std::vector<NDArray*> Imperative::Backward(
         if (retain_graph || info.grad_req != kNullOp) ref_count[eid] = 1;
       }
     }
-    for (size_t i = 0; i < ograd_entries.size(); ++i) {
-      if (!idx.exist(ograd_entries[i].node.get())) continue;
-      AGInfo& info = AGInfo::Get(ograd_entries[i].node);
-      arrays[idx.entry_id(ograd_entries[i])] = &info.outputs[0];
+    for (auto& ograd_entry : ograd_entries) {
+      if (!idx.exist(ograd_entry.node.get())) continue;
+      AGInfo& info = AGInfo::Get(ograd_entry.node);
+      arrays[idx.entry_id(ograd_entry)] = &info.outputs[0];
     }
   }
   for (size_t i = num_forward_outputs; i < graph.outputs.size(); ++i) {
diff --git a/src/imperative/imperative_utils.cc b/src/imperative/imperative_utils.cc
index c84a3b9be50..1a676e62000 100644
--- a/src/imperative/imperative_utils.cc
+++ b/src/imperative/imperative_utils.cc
@@ -79,9 +79,9 @@ void RunGraph(
       arg_dtypes.clear();
       arg_shapes.reserve(ndinputs.size());
       arg_dtypes.reserve(ndinputs.size());
-      for (size_t i = 0; i < ndinputs.size(); ++i) {
-        arg_shapes.emplace_back(ndinputs[i]->shape());
-        arg_dtypes.emplace_back(ndinputs[i]->dtype());
+      for (auto& ndinput : ndinputs) {
+        arg_shapes.emplace_back(ndinput->shape());
+        arg_dtypes.emplace_back(ndinput->dtype());
       }
       states[i] = createop[node.source->op()](
           node.source->attrs, ctx, arg_shapes, arg_dtypes);
diff --git a/src/io/image_aug_default.cc b/src/io/image_aug_default.cc
index bea2e2c0749..a70cafaa97e 100644
--- a/src/io/image_aug_default.cc
+++ b/src/io/image_aug_default.cc
@@ -215,9 +215,9 @@ class DefaultImageAugmenter : public ImageAugmenter {
   void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
     std::vector<std::pair<std::string, std::string> > kwargs_left;
     kwargs_left = param_.InitAllowUnknown(kwargs);
-    for (size_t i = 0; i < kwargs_left.size(); i++) {
-        if (!strcmp(kwargs_left[i].first.c_str(), "rotate_list")) {
-          const char* val = kwargs_left[i].second.c_str();
+    for (auto& kwarg : kwargs_left) {
+        if (!strcmp(kwarg.first.c_str(), "rotate_list")) {
+          const char* val = kwarg.second.c_str();
           const char *end = val + strlen(val);
           char buf[128];
           while (val < end) {
@@ -473,18 +473,18 @@ class DefaultImageAugmenter : public ImageAugmenter {
                                                                   param_.saturation)(*prnd);
       int rand_order[3] = {0, 1, 2};
       std::random_shuffle(std::begin(rand_order), std::end(rand_order));
-      for (int i = 0; i < 3; ++i) {
-        if (rand_order[i] == 0) {
+      for (int i : rand_order) {
+        if (i == 0) {
           // brightness
           res.convertTo(res, -1, alpha_b, 0);
         }
-        if (rand_order[i] == 1) {
+        if (i == 1) {
           // contrast
           cvtColor(res, temp_, CV_RGB2GRAY);
           float gray_mean = cv::mean(temp_)[0];
           res.convertTo(res, -1, alpha_c, (1 - alpha_c) * gray_mean);
         }
-        if (rand_order[i] == 2) {
+        if (i == 2) {
           // saturation
           cvtColor(res, temp_, CV_RGB2GRAY);
           cvtColor(temp_, temp_, CV_GRAY2BGR);
diff --git a/src/io/image_det_aug_default.cc b/src/io/image_det_aug_default.cc
index 15169d8f760..afe5174b75d 100644
--- a/src/io/image_det_aug_default.cc
+++ b/src/io/image_det_aug_default.cc
@@ -349,19 +349,19 @@ class ImageDetLabel {
     if (!valid) return false;
     // transform ground-truth labels
     std::vector<ImageDetObject> new_objects;
-    for (auto iter = objects_.begin(); iter != objects_.end(); ++iter) {
+    for (auto& object : objects_) {
       if (image_det_aug_default_enum::kCenter == crop_emit_mode) {
-        float center_x = (iter->left + iter->right) * 0.5f;
-        float center_y = (iter->top + iter->bottom) * 0.5f;
+        float center_x = (object.left + object.right) * 0.5f;
+        float center_y = (object.top + object.bottom) * 0.5f;
         if (!crop_box.contains(cv::Point2f(center_x, center_y))) {
           continue;
         }
-        new_objects.push_back(iter->Project(crop_box));
+        new_objects.push_back(object.Project(crop_box));
       } else if (image_det_aug_default_enum::kOverlap == crop_emit_mode) {
-        Rect gt_box = iter->ToRect();
+        Rect gt_box = object.ToRect();
         float overlap = (crop_box & gt_box).area() / gt_box.area();
         if (overlap > emit_overlap_thresh) {
-          new_objects.push_back(iter->Project(crop_box));
+          new_objects.push_back(object.Project(crop_box));
         }
       }
     }
@@ -375,8 +375,8 @@ class ImageDetLabel {
    */
   bool TryPad(const Rect pad_box) {
     // update all objects inplace
-    for (auto it = objects_.begin(); it != objects_.end(); ++it) {
-      *it = it->Project(pad_box);
+    for (auto& object : objects_) {
+      object = object.Project(pad_box);
     }
     return true;
   }
@@ -384,8 +384,8 @@ class ImageDetLabel {
   /*! \brief flip image and object coordinates horizontally */
   bool TryMirror() {
     // flip all objects horizontally
-    for (auto it = objects_.begin(); it != objects_.end(); ++it) {
-      *it = it->HorizontalFlip();
+    for (auto& object : objects_) {
+      object = object.HorizontalFlip();
     }
     return true;
   }
diff --git a/src/kvstore/comm.h b/src/kvstore/comm.h
index 581ef81cc1b..7090aaf46d8 100644
--- a/src/kvstore/comm.h
+++ b/src/kvstore/comm.h
@@ -231,9 +231,9 @@ class CommCPU : public Comm {
       << "BroadcastRowSparse expects row-sparse src NDArray";
     CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
       << "BroadcastRowSparse with src on gpu context not supported";
-    for (size_t i = 0; i < dst.size(); ++i) {
-      NDArray* out = dst[i].first;
-      NDArray row_id = dst[i].second;
+    for (const auto& dst_kv : dst) {
+      NDArray* out = dst_kv.first;
+      NDArray row_id = dst_kv.second;
       CHECK_EQ(out->storage_type(), kRowSparseStorage)
                << "BroadcastRowSparse expects row_sparse dst NDArray";
       CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
@@ -621,9 +621,9 @@ class CommDevice : public Comm {
     CHECK_EQ(src.storage_type(), kRowSparseStorage)
       << "BroadcastRowSparse expects row-sparse src NDArray";
 
-    for (size_t i = 0; i < dst.size(); ++i) {
-      NDArray* out = dst[i].first;
-      NDArray row_id = dst[i].second;
+    for (const auto& dst_kv : dst) {
+      NDArray* out = dst_kv.first;
+      NDArray row_id = dst_kv.second;
       CHECK_EQ(out->storage_type(), kRowSparseStorage)
                << "BroadcastRowSparse expects row_sparse dst NDArray";
       CHECK_EQ(row_id.ctx(), src.ctx())
@@ -686,17 +686,17 @@ class CommDevice : public Comm {
       ctx_info[d.dev_id] = std::make_pair(d, 0);
     }
 
-    for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
-      const int key  = std::get<0>(sorted_key_attrs_[i]);
-      const TShape& shape = std::get<1>(sorted_key_attrs_[i]);
-      const int type = std::get<2>(sorted_key_attrs_[i]);
+    for (auto& sorted_key_attr : sorted_key_attrs_) {
+      const int key  = std::get<0>(sorted_key_attr);
+      const TShape& shape = std::get<1>(sorted_key_attr);
+      const int type = std::get<2>(sorted_key_attr);
       auto& buf = merge_buf_[key];
       Context ctx;
       size_t min_size = std::numeric_limits<size_t>::max();
-      for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
-        size_t size = it->second.second;
+      for (auto& ctx_info_kv : ctx_info) {
+        size_t size = ctx_info_kv.second.second;
         if (size <= min_size) {
-          ctx = it->second.first;
+          ctx = ctx_info_kv.second.first;
           min_size = size;
         }
       }
diff --git a/src/kvstore/comm_tree.h b/src/kvstore/comm_tree.h
index 8d36803ecbe..e3b2ad7f57d 100644
--- a/src/kvstore/comm_tree.h
+++ b/src/kvstore/comm_tree.h
@@ -102,7 +102,7 @@ class CommDeviceTree : public CommDevice {
 
     if (stype == kDefaultStorage) {
       // Copy everything into buf.merged for each gpu
-      for (size_t i = 0; i < src.size(); ++i) {
+      for (const auto& src_gpu_value : src) {
         int start = scan_[root][depth_];
         int end = scan_[root][depth_+1];
 
@@ -110,8 +110,8 @@ class CommDeviceTree : public CommDevice {
           int topo_id = topology[j];
           TreeBufferEntry& buf = tree_merge_buf_[topo_id][key];
 
-          if (devs_[topo_id] == src[i].ctx()) {
-            CopyFromTo(src[i], &(buf.merged[merged_row]), priority);
+          if (devs_[topo_id] == src_gpu_value.ctx()) {
+            CopyFromTo(src_gpu_value, &(buf.merged[merged_row]), priority);
           }
         }
       }
@@ -401,10 +401,10 @@ class CommDeviceTree : public CommDevice {
     bool delay_alloc = true;
     std::map<int, int> key_dist;
 
-    for (size_t i = 0; i < tree_sorted_key_attrs_.size(); ++i) {
-      const int key  = std::get<0>(tree_sorted_key_attrs_[i]);
-      const TShape& shape = std::get<1>(tree_sorted_key_attrs_[i]);
-      const int type = std::get<2>(tree_sorted_key_attrs_[i]);
+    for (auto& tree_sorted_key_attr : tree_sorted_key_attrs_) {
+      const int key  = std::get<0>(tree_sorted_key_attr);
+      const TShape& shape = std::get<1>(tree_sorted_key_attr);
+      const int type = std::get<2>(tree_sorted_key_attr);
 
       if (key_dist.find(shape.Size()) == key_dist.end())
         key_dist[shape.Size()] = 1;
@@ -485,8 +485,8 @@ class CommDeviceTree : public CommDevice {
       }
     }
 
-    for (auto it = key_dist.begin(); it != key_dist.end(); ++it) {
-      LOG(INFO) << "Size " << it->first << " occurs " << it->second << " times";
+    for (auto& kv : key_dist) {
+      LOG(INFO) << "Size " << kv.first << " occurs " << kv.second << " times";
     }
     inited_ = true;
   }
diff --git a/src/kvstore/gpu_topology.h b/src/kvstore/gpu_topology.h
index a8801499c3b..2a21758006e 100644
--- a/src/kvstore/gpu_topology.h
+++ b/src/kvstore/gpu_topology.h
@@ -278,8 +278,8 @@ inline bool KernighanLin(const std::vector<T>& W, std::vector<int>* P,
 
   // 0) For every partition, determine if it can be partitioned further.
   //    To do this, we must do a histogram of each partition:
-  for (unsigned i=0; i < P->size(); ++i) {
-    histogram[(*P)[i]]++;
+  for (int partition : *P) {
+    histogram[partition]++;
   }
 
   bool stop = true;
@@ -315,13 +315,13 @@ inline bool KernighanLin(const std::vector<T>& W, std::vector<int>* P,
 
       // 1b) Shuffle using random generator
       std::shuffle(cluster_list.begin(), cluster_list.end(), *gen);
-      for (unsigned i = 0; i < cluster_list.size(); ++i) {
+      for (int cluster : cluster_list) {
         if (first_partition < target_partition) {
-          int dest = cluster_list[i];
+          int dest = cluster;
           P_temp[dest] = 1;
           first_partition++;
         } else {
-          int dest = cluster_list[i];
+          int dest = cluster;
           P_temp[dest] = -1;
         }
       }
@@ -758,8 +758,8 @@ inline bool FormTopology(const std::vector<int>& result,
                          std::vector<size_t>* topo_row,
                          std::vector<size_t>* scan_row,
                          int depth) {
-  for (unsigned i = 0; i < result.size(); ++i)
-    if (result[i] == -1)
+  for (int result_value : result)
+    if (result_value == -1)
       return false;
 
   scan_row->push_back(topo_row->size());
diff --git a/src/ndarray/ndarray.cc b/src/ndarray/ndarray.cc
index 5d8e39dea22..081d4e75932 100644
--- a/src/ndarray/ndarray.cc
+++ b/src/ndarray/ndarray.cc
@@ -122,8 +122,8 @@ NDArray::Chunk::~Chunk() {
       }
 #endif
       if (mem.h.size > 0) Storage::Get()->Free(mem.h);
-      for (size_t i = 0; i < mem.aux_h.size(); i++) {
-        if (mem.aux_h[i].size > 0) Storage::Get()->Free(mem.aux_h[i]);
+      for (const auto& aux : mem.aux_h) {
+        if (aux.size > 0) Storage::Get()->Free(aux);
       }
     }
   }, shandle.ctx, var);
@@ -1280,17 +1280,17 @@ void CopyFromTo(const NDArray& from, const NDArray *to, int priority) {
 void ElementwiseSum(const std::vector<NDArray> &source, NDArray *out, int priority) {
   std::vector<Engine::VarHandle> const_vars;
   const_vars.reserve(source.size());
-  for (size_t i = 0; i < source.size(); ++i) {
-    if (source[i].var() != out->var()) {
-      const_vars.push_back(source[i].var());
+  for (const auto& source_array : source) {
+    if (source_array.var() != out->var()) {
+      const_vars.push_back(source_array.var());
     }
-    CHECK_EQ(source[i].shape() , out->shape())
+    CHECK_EQ(source_array.shape() , out->shape())
         << "operands shape mismatch";
     if (out->ctx().dev_mask() == Context::kCPU) {
-      CHECK_EQ(source[i].ctx().dev_mask(), Context::kCPU)
+      CHECK_EQ(source_array.ctx().dev_mask(), Context::kCPU)
           << "operands context mismatch";
     } else {
-      CHECK_EQ(source[i].ctx(), out->ctx())
+      CHECK_EQ(source_array.ctx(), out->ctx())
           << "operands context mismatch";
     }
   }
diff --git a/src/nnvm/graph_editor.cc b/src/nnvm/graph_editor.cc
index 1dee3c14ee4..e3ff3f723be 100644
--- a/src/nnvm/graph_editor.cc
+++ b/src/nnvm/graph_editor.cc
@@ -76,22 +76,21 @@ bool CutGraphInputs(const std::vector<nnvm::NodeEntry *> &input_entries,
   std::vector<nnvm::NodePtr> var_nodes;
   orig_entries->clear();
   orig_entries->reserve(input_entries.size());
-  for (size_t i = 0; i < input_entries.size(); i++) {
-    nnvm::NodeEntry *e = input_entries[i];
+  for (auto input_entry : input_entries) {
     // If the node is a variable itself, we may want to skip the node.
-    if (e->node->is_variable() && skip_var)
+    if (input_entry->node->is_variable() && skip_var)
       continue;
 
     auto it = std::find_if(orig_entries->begin(), orig_entries->end(),
-                           pred_entry(*e));
+                           pred_entry(*input_entry));
     bool exist = (it != orig_entries->end());
-    orig_entries->push_back(*e);
+    orig_entries->push_back(*input_entry);
     nnvm::NodePtr n;
     // If we haven't seen the entry before, we need to create a new var node
     // for the node entry.
     if (!exist) {
       nnvm::Symbol sym;
-      sym.outputs.push_back(*e);
+      sym.outputs.push_back(*input_entry);
       n = nnvm::CreateVariableNode(sym.ListOutputNames()[0]);
     } else {
       // Otherwise, we use the var node created before.
@@ -100,7 +99,7 @@ bool CutGraphInputs(const std::vector<nnvm::NodeEntry *> &input_entries,
       n = var_nodes[idx];
     }
     var_nodes.push_back(n);
-    *e = nnvm::NodeEntry{n, 0, 0};
+    *input_entry = nnvm::NodeEntry{n, 0, 0};
   }
   return true;
 }
diff --git a/src/nnvm/legacy_json_util.cc b/src/nnvm/legacy_json_util.cc
index 0697aebbd0d..a2d14c2135d 100644
--- a/src/nnvm/legacy_json_util.cc
+++ b/src/nnvm/legacy_json_util.cc
@@ -76,9 +76,9 @@ Graph UpgradeJSON_FixParsing(Graph g) {
         n->op()->attr_parser(&(n->attrs));
 
       // add back removed hidden keys
-      for (const auto &kv : hidden_keys) {
+      for (const auto& kv : hidden_keys) {
         bool flag = false;
-        for (const auto &key : kHiddenKeys) {
+        for (const auto& key : kHiddenKeys) {
           size_t pos = kv.first.rfind(key);
           if (pos == 0 && key.length() == kv.first.length()) {
             n->attrs.dict["__"+key+"__"] = kv.second;
@@ -211,8 +211,8 @@ Graph LoadLegacyJSONPass(Graph g) {
               << ". Attempting to upgrade...";
     upgrading = true;
   }
-  for (auto it = upgrader_list.begin(); it != upgrader_list.end(); ++it) {
-    if (it->first > version) load = it->second(load);
+  for (auto& upgrader : upgrader_list) {
+    if (upgrader.first > version) load = upgrader.second(load);
   }
   if (upgrading) LOG(INFO) << "Symbol successfully upgraded!";
   return load;
diff --git a/src/nnvm/legacy_op_util.cc b/src/nnvm/legacy_op_util.cc
index 63be619beb1..4ab777b6adb 100644
--- a/src/nnvm/legacy_op_util.cc
+++ b/src/nnvm/legacy_op_util.cc
@@ -388,13 +388,13 @@ std::vector<std::pair<int, int> > OpBackInplaceOption(const NodeAttrs& attrs) {
   std::vector<int> out_data_index(prop.outputs.size());
 
   int counter = 0;
-  for (size_t i = 0; i < in_data_index.size(); ++i) {
+  for (const int& i : in_data_index) {
     in_data_index[i] = counter++;
   }
-  for (size_t i = 0; i < out_grad_index.size(); ++i) {
+  for (const int& i : out_grad_index) {
     out_grad_index[i] = counter++;
   }
-  for (size_t i = 0; i < out_data_index.size(); ++i) {
+  for (const int& i : out_data_index) {
     out_data_index[i] = counter++;
   }
 
diff --git a/src/operator/bilinear_sampler-inl.h b/src/operator/bilinear_sampler-inl.h
index 499d2339620..49a5b5e5d5d 100644
--- a/src/operator/bilinear_sampler-inl.h
+++ b/src/operator/bilinear_sampler-inl.h
@@ -176,12 +176,12 @@ class BilinearSamplerProp : public OperatorProperty {
                    std::vector<int> *out_type,
                    std::vector<int> *aux_type) const override {
       int dtype = -1;
-      for (size_t i = 0; i < in_type->size(); ++i) {
+      for (int type : *in_type) {
         if (dtype == -1) {
-          dtype = in_type->at(i);
+          dtype = type;
         } else {
-          CHECK(in_type->at(i) == dtype ||
-                in_type->at(i) == -1) <<
+          CHECK(type == dtype ||
+              type == -1) <<
                 "Non-uniform data type in BilinearSampler";
         }
       }
diff --git a/src/operator/custom/custom.cc b/src/operator/custom/custom.cc
index 1bcbbbcf3e8..2643abbe9e5 100644
--- a/src/operator/custom/custom.cc
+++ b/src/operator/custom/custom.cc
@@ -91,7 +91,7 @@ void AttrParser(NodeAttrs* attrs) {
   CustomParam& params = nnvm::get<CustomParam>(attrs->parsed);
 
   std::vector<const char*> keys, vals;
-  for (auto &p : attrs->dict) {
+  for (auto& p : attrs->dict) {
     if (p.first == "op_type") {
       params.op_type = p.second;
     } else {
@@ -185,7 +185,7 @@ bool InferType(const NodeAttrs& attrs,
   for (size_t i = 0; i < params.num_args; ++i) {
     types.push_back((*in_type)[i]);
   }
-  for (const auto &i : *out_type) {
+  for (const auto& i : *out_type) {
     types.push_back(i);
   }
   for (size_t i = 0; i < params.num_auxs; ++i) {
@@ -387,11 +387,11 @@ void BackwardEx(const OpStatePtr& state, const OpContext& ctx,
     cpys.push_back(*nd);
     ptrs[params.bwd_idx[i]] = reinterpret_cast<void*>(nd);
   }
-  for (size_t i = 0; i < ptrs.size(); ++i) {
+  for (auto& ptr : ptrs) {
     NDArray* nd;
-    if (ptrs[i] == nullptr) {
+    if (ptr == nullptr) {
         nd = new NDArray();
-        ptrs[i] = reinterpret_cast<void*>(nd);
+      ptr = reinterpret_cast<void*>(nd);
     }
   }
   for (size_t i = 0; i < outputs.size(); ++i) {
@@ -457,8 +457,8 @@ inline bool BackwardInferStorageType(const nnvm::NodeAttrs& attrs,
     stypes[i] = (*iattr)[i];
   }
 
-  for (size_t i = 0; i < oattr->size(); i++) {
-    stypes.push_back((*oattr)[i]);
+  for (int i : *oattr) {
+    stypes.push_back(i);
     tags.push_back(2);
   }
 
diff --git a/src/operator/custom/native_op-inl.h b/src/operator/custom/native_op-inl.h
index 05ad124ad34..8da04abc0a3 100644
--- a/src/operator/custom/native_op-inl.h
+++ b/src/operator/custom/native_op-inl.h
@@ -206,9 +206,9 @@ class NativeOpProp : public OperatorProperty {
 
   void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
     param_.Init(kwargs);
-    for (auto iter = kwargs.begin(); iter != kwargs.end(); ++iter) {
-      if (iter->first == "info") {
-        sscanf(iter->second.c_str(), "%p", &param_.pinfo);
+    for (const auto& kwarg : kwargs) {
+      if (kwarg.first == "info") {
+        sscanf(kwarg.second.c_str(), "%p", &param_.pinfo);
       }
     }
     param_.num_inputs_ = ListArguments().size();
@@ -229,10 +229,10 @@ class NativeOpProp : public OperatorProperty {
     for (const auto& s : *in_shape) size += s.ndim();
     std::vector<uint32_t> shapes_buffer(size);
     uint32_t *ptr = shapes_buffer.data();
-    for (auto iter = in_shape->begin(); iter != in_shape->end(); ++iter) {
+    for (const auto& shape : *in_shape) {
       shapes.push_back(ptr);
-      ndims.push_back(iter->ndim());
-      ptr = nnvm::ShapeTypeCast(iter->begin(), iter->end(), ptr);
+      ndims.push_back(shape.ndim());
+      ptr = nnvm::ShapeTypeCast(shape.begin(), shape.end(), ptr);
     }
     shapes.resize(param_.num_inputs_+param_.num_outputs_);
     ndims.resize(param_.num_inputs_+param_.num_outputs_);
diff --git a/src/operator/custom/ndarray_op-inl.h b/src/operator/custom/ndarray_op-inl.h
index 02c5630b60f..5490747d7d4 100644
--- a/src/operator/custom/ndarray_op-inl.h
+++ b/src/operator/custom/ndarray_op-inl.h
@@ -131,10 +131,10 @@ class NDArrayOpProp : public OperatorProperty {
     for (const auto& s : *in_shape) size += s.ndim();
     std::vector<uint32_t> shapes_buffer(size);
     uint32_t *ptr = shapes_buffer.data();
-    for (auto iter = in_shape->begin(); iter != in_shape->end(); ++iter) {
+    for (const auto& shape : *in_shape) {
       shapes.push_back(ptr);
-      ndims.push_back(iter->ndim());
-      ptr = nnvm::ShapeTypeCast(iter->begin(), iter->end(), ptr);
+      ndims.push_back(shape.ndim());
+      ptr = nnvm::ShapeTypeCast(shape.begin(), shape.end(), ptr);
     }
     shapes.resize(param_.num_inputs_+param_.num_outputs_);
     ndims.resize(param_.num_inputs_+param_.num_outputs_);
diff --git a/src/operator/grid_generator-inl.h b/src/operator/grid_generator-inl.h
index 105630cfc26..258ec9ae957 100644
--- a/src/operator/grid_generator-inl.h
+++ b/src/operator/grid_generator-inl.h
@@ -248,12 +248,12 @@ class GridGeneratorProp : public OperatorProperty {
                    std::vector<int> *out_type,
                    std::vector<int> *aux_type) const override {
       int dtype = -1;
-      for (size_t i = 0; i < in_type->size(); ++i) {
+      for (int type : *in_type) {
         if (dtype == -1) {
-          dtype = in_type->at(i);
+          dtype = type;
         } else {
-          CHECK(in_type->at(i) == dtype ||
-                in_type->at(i) == -1) <<
+          CHECK(type == dtype ||
+              type == -1) <<
                 "Non-uniform data type in GridGenerator";
         }
       }
diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc
index ac8a814ce70..38afa5f755e 100644
--- a/src/operator/nn/concat.cc
+++ b/src/operator/nn/concat.cc
@@ -139,12 +139,12 @@ static bool ConcatType(const nnvm::NodeAttrs& attrs,
   const ConcatParam& param_ = nnvm::get<ConcatParam>(attrs.parsed);
   int dtype = -1;
 
-  for (size_t i = 0; i < in_type->size(); ++i) {
+  for (int i : *in_type) {
     if (dtype == -1) {
-      dtype = in_type->at(i);
+      dtype = i;
     } else {
-      CHECK(in_type->at(i) == dtype ||
-            in_type->at(i) == -1) <<
+      CHECK(i == dtype ||
+          i == -1) <<
           "Non-uniform data type in Concat";
     }
   }
diff --git a/src/operator/operator_common.h b/src/operator/operator_common.h
index d7c14172477..b1822647cf0 100644
--- a/src/operator/operator_common.h
+++ b/src/operator/operator_common.h
@@ -337,8 +337,8 @@ inline bool storage_type_assign(StorageTypeVector* stypes,
                                 const DispatchMode target_dispatch) {
   CHECK_GT(stypes->size(), 0);
   bool success = true;
-  for (size_t i = 0; i < stypes->size(); i++) {
-    if (!type_assign(&(*stypes)[i], target_stype)) {
+  for (int& stype : *stypes) {
+    if (!type_assign(&stype, target_stype)) {
       success = false;
     }
   }
diff --git a/src/operator/operator_util.cc b/src/operator/operator_util.cc
index 326a1ca38ba..0c6f176a023 100644
--- a/src/operator/operator_util.cc
+++ b/src/operator/operator_util.cc
@@ -407,13 +407,13 @@ class SimpleOpPropBase : public OperatorProperty {
     CHECK_LE(in_type->size(), this->ListArguments().size());
     int dtype = -1;
     // reduce dtype to a common one.
-    for (unsigned i = 0; i < in_type->size(); ++i) {
+    for (int i : *in_type) {
       if (dtype == -1) {
-        dtype = in_type->at(i);
+        dtype = i;
       } else {
-        CHECK(in_type->at(i) == -1 ||
-              in_type->at(i) == dtype) <<
-          "Non-uniform input data type. Expected " << dtype << "got " << in_type->at(i);
+        CHECK(i == -1 ||
+            i == dtype) <<
+          "Non-uniform input data type. Expected " << dtype << "got " << i;
       }
     }
 
diff --git a/src/operator/quantization/quantized_pooling.cc b/src/operator/quantization/quantized_pooling.cc
index 779e244c862..477830a6353 100644
--- a/src/operator/quantization/quantized_pooling.cc
+++ b/src/operator/quantization/quantized_pooling.cc
@@ -114,8 +114,8 @@ inline static bool QuantizedPoolingStorageType(const nnvm::NodeAttrs &attrs,
 #else
   CHECK_EQ(out_attrs->size(), 3);
 #endif
-  for (size_t i = 0; i < out_attrs->size(); i++)
-    (*out_attrs)[i] = kDefaultStorage;
+  for (int& out_attr : *out_attrs)
+    out_attr = kDefaultStorage;
   return true;
 }
 
diff --git a/src/operator/random/multisample_op.h b/src/operator/random/multisample_op.h
index 360c100c6eb..abd4a2c6c6d 100644
--- a/src/operator/random/multisample_op.h
+++ b/src/operator/random/multisample_op.h
@@ -78,8 +78,8 @@ inline bool MultiSampleOpShape(const nnvm::NodeAttrs& attrs,
     tshape = TShape(tshape.begin(), tshape.begin()+(tshape.ndim()-sshape.ndim()));
   }
   // Shape assignemnt/checking for inputs.
-  for (size_t i = 0; i < in_attrs->size(); ++i) {
-    if ( !shape_assign(&tshape, (*in_attrs)[i])) return false;
+  for (const auto& in_attr : *in_attrs) {
+    if ( !shape_assign(&tshape, in_attr)) return false;
   }
   for (size_t i = 0; i < in_attrs->size(); ++i) {
     SHAPE_ASSIGN_CHECK(*in_attrs, i, tshape);
@@ -105,8 +105,8 @@ inline bool MultiSampleOpType(const nnvm::NodeAttrs& attrs,
 
   // All inputs must have same type.
   int dtype = -1;
-  for (size_t i = 0; i < in_attrs->size(); ++i) {
-    if (!type_assign(&dtype, (*in_attrs)[i])) return false;
+  for (int in_attr : *in_attrs) {
+    if (!type_assign(&dtype, in_attr)) return false;
   }
   for (size_t i = 0; i < in_attrs->size(); ++i) {
     TYPE_ASSIGN_CHECK(*in_attrs, i, dtype);
diff --git a/src/operator/spatial_transformer-inl.h b/src/operator/spatial_transformer-inl.h
index 3e863d877b0..a7ecdaecb10 100644
--- a/src/operator/spatial_transformer-inl.h
+++ b/src/operator/spatial_transformer-inl.h
@@ -216,12 +216,12 @@ class SpatialTransformerProp : public OperatorProperty {
                    std::vector<int> *out_type,
                    std::vector<int> *aux_type) const override {
       int dtype = -1;
-      for (size_t i = 0; i < in_type->size(); ++i) {
+      for (int i_type : *in_type) {
         if (dtype == -1) {
-          dtype = in_type->at(i);
+          dtype = i_type;
         } else {
-          CHECK(in_type->at(i) == dtype ||
-                in_type->at(i) == -1) <<
+          CHECK(i_type == dtype ||
+              i_type == -1) <<
                 "Non-uniform data type in SpatialTransformer";
         }
       }
diff --git a/src/operator/subgraph/partition_graph.cc b/src/operator/subgraph/partition_graph.cc
index da9a9f375fa..90a14caa510 100644
--- a/src/operator/subgraph/partition_graph.cc
+++ b/src/operator/subgraph/partition_graph.cc
@@ -495,15 +495,14 @@ void FindInputEntries(const Graph& g,
                       std::vector<nnvm::NodeEntry*>* input_entries) {
   const auto& indexed_graph = g.indexed_graph();
   int label = -1;
-  for (size_t i = 0; i < subgraph_nodes.size(); ++i) {
+  for (auto subgraph_node : subgraph_nodes) {
     if (label == -1) {
-      label = subgraph_nodes[i]->label;
+      label = subgraph_node->label;
     } else {
-      CHECK_EQ(subgraph_nodes[i]->label, label);
+      CHECK_EQ(subgraph_node->label, label);
     }
-    auto& inputs = subgraph_nodes[i]->node->inputs;
-    for (size_t j = 0; j < inputs.size(); ++j) {
-      auto& e = inputs[j];
+    auto& inputs = subgraph_node->node->inputs;
+    for (auto &e : inputs) {
       if (indexed_graph.exist(e.node.get())) {
         // e's source node is not a subgraph node
         const auto nid = indexed_graph.node_id(e.node.get());
@@ -538,20 +537,19 @@ void FindOutputEntries(Graph* g,
   if (subgraph_nodes.empty()) return;
   const auto& indexed_graph = g->indexed_graph();
   int label = -1;
-  for (size_t i = 0; i < subgraph_nodes.size(); ++i) {
+  for (auto subgraph_node : subgraph_nodes) {
     if (label == -1) {
-      label = subgraph_nodes[i]->label;
+      label = subgraph_node->label;
     } else {
-      CHECK_EQ(subgraph_nodes[i]->label, label);
+      CHECK_EQ(subgraph_node->label, label);
     }
-    for (auto it = subgraph_nodes[i]->outputs.begin();
-         it != subgraph_nodes[i]->outputs.end(); ++it) {
-      if (indexed_graph.exist(it->first)) {
+    for (auto &output_node : subgraph_node->outputs) {
+      if (indexed_graph.exist(output_node.first)) {
         // if the output node is a normal graph node (not a subgraph node)
-        const auto nid = indexed_graph.node_id(it->first);
+        const auto nid = indexed_graph.node_id(output_node.first);
         // this is a node not belonging to the current subgraph
         if (simple_nodes[nid]->label != label) {
-          for (auto idx : it->second) {
+          for (auto idx : output_node.second) {
             auto& e = simple_nodes[nid]->node->inputs[idx];
             output_entries->push_back(&e);
           }
@@ -559,16 +557,15 @@ void FindOutputEntries(Graph* g,
       } else {
         // if the output node is a subgraph node
         // two graphs are adjacent
-        for (auto idx : it->second) {
-          output_entries->push_back(&(it->first->inputs[idx]));
+        for (auto idx : output_node.second) {
+          output_entries->push_back(&(output_node.first->inputs[idx]));
         }
       }
     }
   }
   // Check if current subgraph contains a node which is the last node
   // of the whole graph. If so, save its corresponding entry as well.
-  for (size_t i = 0; i < g->outputs.size(); ++i) {
-    auto& entry = g->outputs[i];
+  for (auto &entry : g->outputs) {
     // The entry might has been updated as an output of
     // a subgraph node. In this case, no need
     // to check its source for the current subgraph. Otherwise,
diff --git a/src/operator/tensor/matrix_op-inl.h b/src/operator/tensor/matrix_op-inl.h
index bfee083ca1a..656748e9d21 100644
--- a/src/operator/tensor/matrix_op-inl.h
+++ b/src/operator/tensor/matrix_op-inl.h
@@ -1977,11 +1977,11 @@ void ReverseOpForward(const nnvm::NodeAttrs& attrs,
   std::vector<index_t> stride_(param.axis.ndim());
   std::vector<index_t>  trailing_(param.axis.ndim());
   index_t reverse_index = 0;
-  for (auto axis_iter = param.axis.begin() ; axis_iter!= param.axis.end(); ++axis_iter) {
-    CHECK_LT(*axis_iter, static_cast<int>(ishape.ndim()));
-    stride_[reverse_index] = ishape[*axis_iter];
+  for (int axis : param.axis) {
+    CHECK_LT(axis, static_cast<int>(ishape.ndim()));
+    stride_[reverse_index] = ishape[axis];
     trailing_[reverse_index] = 1;
-    for (index_t i2 = *axis_iter + 1; i2 < ishape.ndim(); ++i2) {
+    for (index_t i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
       trailing_[reverse_index] *= ishape[i2];
     }
     reverse_index++;
diff --git a/src/profiler/aggregate_stats.cc b/src/profiler/aggregate_stats.cc
index 66f833e699d..4c135beaf66 100644
--- a/src/profiler/aggregate_stats.cc
+++ b/src/profiler/aggregate_stats.cc
@@ -51,10 +51,9 @@ void AggregateStats::Dump(std::ostream& os, bool clear) {
      << "\tNote that counter items are counter values and not time units."
      << std::endl;
   std::unique_lock<std::mutex> lk(m_);
-  for (auto type_iter = stats_.begin(), type_e_iter = stats_.end();
-       type_iter != type_e_iter; ++type_iter) {
-    const std::string& type = type_iter->first;
-    const std::unordered_map<std::string, StatData>& mm = type_iter->second;
+  for (const auto& stat : stats_) {
+    const std::string& type = stat.first;
+    const std::unordered_map<std::string, StatData>& mm = stat.second;
     if (!mm.empty()) {
       os << type << std::endl << "=================" << std::endl;
       os << std::setw(25) << std::left  << "Name"
@@ -87,10 +86,10 @@ void AggregateStats::Dump(std::ostream& os, bool clear) {
          << std::setw(16) << std::right
          << "-------------"
          << std::endl;
-      for (auto iter = mm.begin(), e_iter = mm.end(); iter != e_iter; ++iter) {
-        const StatData &data = iter->second;
+      for (const auto& iter : mm) {
+        const StatData &data = iter.second;
         if (data.type_ == StatData::kDuration || data.type_ == StatData::kCounter) {
-          const std::string &name = iter->first;
+          const std::string &name = iter.first;
           os << std::setw(25) << std::left << name
              << std::setw(16) << std::right << data.total_count_;
           os << " "


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services