You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/11/03 05:18:43 UTC

[GitHub] lihaofd commented on a change in pull request #12922: Support Quantized Fully Connected by INT8 GEMM

lihaofd commented on a change in pull request #12922: Support Quantized Fully Connected by INT8 GEMM
URL: https://github.com/apache/incubator-mxnet/pull/12922#discussion_r230545552
 
 

 ##########
 File path: src/operator/quantization/quantized_fully_connected.cc
 ##########
 @@ -79,6 +85,145 @@ bool QuantizedFullyConnectedType(const nnvm::NodeAttrs& attrs,
   return true;
 }
 
+bool QuantizedFullyConnectedStorageType(const nnvm::NodeAttrs& attrs,
+                                        const int dev_mask,
+                                        DispatchMode* dispatch_mode,
+                                        std::vector<int> *in_attrs,
+                                        std::vector<int> *out_attrs) {
+  *dispatch_mode = DispatchMode::kFCompute;
+  if (dev_mask == mshadow::cpu::kDevMask) {
+    *dispatch_mode = DispatchMode::kFComputeEx;
+  }
+  for (size_t i = 0; i < out_attrs->size(); i++) {
+    STORAGE_TYPE_ASSIGN_CHECK(*out_attrs, i, kDefaultStorage);
+    if (common::stype_string((*out_attrs)[i]).compare("unknown") == 0) {
+      return false;
+    }
+  }
+  for (size_t i = 0; i < in_attrs->size(); i++) {
+    STORAGE_TYPE_ASSIGN_CHECK(*in_attrs, i, kDefaultStorage);
+    if (common::stype_string((*in_attrs)[i]).compare("unknown") == 0) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+struct QuantizedSumInitKernelWithBias {
+  //  init sum data with bias for matrix b (n)
+  MSHADOW_XINLINE static void Map(int i, int32_t *out,
+                                  const int8_t *bias, const float *min_out,
+                                  const float *max_out, const float *min_bias,
+                                  const float *max_bias) {
+    typedef int32_t T1;
+    typedef int8_t  T2;
+    using mshadow::red::limits::MinValue;
+    using mshadow::red::limits::MaxValue;
+    float float_for_one_out_quant  =
+      MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<T1>());
+    float float_for_one_bias_quant =
+      MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<T2>());
+    if (float_for_one_out_quant != 0) {
+      out[i] = bias[i] * float_for_one_bias_quant /
+               float_for_one_out_quant;
+    } else {
+      LOG(INFO) << "WARNING: QuantizedBiasAddKernel float_for_one_out_quant is 0 !";
+      out[i] = 0;
+    }
+  }
+};
+template<typename SrcType>
+void MKLDNNQuantizedFullyConnectedForward(const nnvm::NodeAttrs& attrs,
 
 Review comment:
   fixed

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services