You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/12/23 18:45:41 UTC

[GitHub] [tvm] jwfromm commented on a change in pull request #9637: Add FP requantize flow for llvm target

jwfromm commented on a change in pull request #9637:
URL: https://github.com/apache/tvm/pull/9637#discussion_r774731091



##########
File path: python/tvm/relay/qnn/op/qnn.py
##########
@@ -92,6 +92,44 @@ def requantize(
     )
 
 
+def upward(data):

Review comment:
       Would it make sense to add these as an attribute to `relay.round`? For example we could do `relay.round(x, mode=upward)`. I think that would be a cleaner interface unless there's a reason for having them as separate operators.

##########
File path: src/relay/qnn/op/requantize.cc
##########
@@ -208,6 +310,127 @@ Expr RequantizeLower(const Expr& input_tensor, const Expr& input_scale,
   return Cast(clipped_t, out_dtype);
 }
 
+// Lowering of qnn.requantize op
+
+/*
+ * \brief Lower requantize to a sequence of ops.
+ * \param input_tensor The input tensor to requantize op.
+ * \param param The requantize op attrs.
+ * \param input_shape The input tensor shape of the requantize op.
+ * \return The sequence of existing Relay ops.
+ * \note RequantizationFP using floating computation. All multiplication/sub/sum
+ *       occurs in floating point data type and only at the end is converted to
+ *       int32 data type and clamped for output data type.
+ *
+ *       The whole computation this can be broken down into following steps
+ *       1) Subtract the input zero point.
+ *       2) Perform multiplication.
+ *       3) Add the output zero point.
+ *       4) Cast to the out_dtype.
+ */
+Expr RequantizeLowerFP(const Expr& input_tensor, const Expr& input_scale,
+                       const Expr& input_zero_point, const Expr& output_scale,
+                       const Expr& output_zero_point, const RequantizeAttrs* param,
+                       const Array<IndexExpr>& input_shape, const DataType& out_dtype) {
+  auto tensor = Cast(input_tensor, DataType::Float(64));
+  auto zero_scalar = MakeConstantScalar(DataType::Int(32), 0);
+  if (!IsEqualScalar(input_zero_point, zero_scalar)) {
+    // Broadcast input zero point if needed.
+    int rank = static_cast<int>(input_shape.size());
+    int axis = (param->axis < 0) ? ((rank > 0) ? rank + param->axis : 0) : param->axis;
+    Expr input_zero_broadcast = ExpandBiasToMatchAxis(Reshape(input_zero_point,
+                                                              {
+                                                                  -1,
+                                                              }),
+                                                      rank, {axis});
+    tensor = Subtract(tensor, Cast(input_zero_broadcast, DataType::Float(64)));
+  }
+
+  // 2) If the input and output scales are same, we can skip the multiplication. Check
+  // if the input scale is per-tensor or per-channel. If it is per-tensor, there is single scale for
+  // the whole tensor. For per-channel (aka per-axis), there is a vector of scales for the input
+  // tensor. Depending on the quantization type, the fixed point multiplication routing is called.
+  auto scaled_fp64_t = tensor;
+  double output_scale_float = GetScalarFromConstant<float>(output_scale);
+  if (IsConstScalar(input_scale)) {
+    // This is per-tensor quantization. Single scale.
+    double input_scale_float = GetScalarFromConstant<float>(input_scale);
+    double double_multiplier = input_scale_float / output_scale_float;
+    // Skip if input and output scales are same.
+    if (!IsEqualScalar(input_scale, output_scale)) {
+      double multiplier = double_multiplier;
+      auto m_scalar = MakeConstantScalar(DataType::Float(64), multiplier);
+      scaled_fp64_t = Multiply(m_scalar, scaled_fp64_t);
+    }
+
+  } else {
+    // This is per-channel (per=axis) quantization.
+    std::vector<double> double_multipliers;
+    auto input_axis_scales = GetFloatVectorFromConstant(input_scale);
+    double output_scale_float = GetScalarFromConstant<float>(output_scale);
+    for (auto input_axis_scale : input_axis_scales) {
+      double multiplier = static_cast<double>(input_axis_scale) / output_scale_float;
+      double_multipliers.push_back(multiplier);
+    }
+    int axis = param->axis;
+    axis = (axis == -1) ? input_shape.size() - 1 : axis;
+
+    auto fixed_pt_multiplier_expr = MakeConstantTensor(
+        DataType::Float(64), {(int64_t)double_multipliers.size()}, double_multipliers);
+    size_t n_dim = input_shape.size();
+    auto exp_fixed_pt_multiplier_expr =
+        ExpandBiasToMatchAxis(fixed_pt_multiplier_expr, n_dim, {axis});
+
+    scaled_fp64_t = Multiply(scaled_fp64_t, exp_fixed_pt_multiplier_expr);
+  }
+
+  // 3) Add the output zero point.
+  auto shifted_fp64_t = scaled_fp64_t;
+  if (!IsEqualScalar(output_zero_point, zero_scalar)) {
+    shifted_fp64_t = Add(shifted_fp64_t, Cast(output_zero_point, DataType::Float(64)));
+  }
+
+  if (param->rounding == "UPWARD") {
+    shifted_fp64_t = Upward(shifted_fp64_t);
+  } else /*if (param->rounding == "TONEAREST")*/ {
+    shifted_fp64_t = Tonearest(shifted_fp64_t);
+  }
+
+  shifted_fp64_t = Cast(shifted_fp64_t, DataType::Int(32));
+  // 4) Clip to the out_dtype min/max. Skip clipping if out_dtype is Int32. The fixed point
+  // multiplication keeps the value in int32 range.
+  if (out_dtype == DataType::Int(32)) {
+    return shifted_fp64_t;
+  }
+
+  auto q_min = GetQmin(out_dtype);
+  auto q_max = GetQmax(out_dtype);
+  auto clipped_t = Clip(shifted_fp64_t, q_min, q_max);
+  return Cast(clipped_t, out_dtype);
+}
+
+// Lowering of qnn.requantize op
+/*
+ * \brief Lower requantize to a sequence of ops.
+ * \param input_tensor The input tensor to requantize op.
+ * \param param The requantize op attrs.
+ * \param input_shape The input tensor shape of the requantize op.
+ * \return The sequence of existing Relay ops.
+ */
+Expr RequantizeLower(const Expr& input_tensor, const Expr& input_scale,
+                     const Expr& input_zero_point, const Expr& output_scale,
+                     const Expr& output_zero_point, const RequantizeAttrs* param,
+                     const Array<IndexExpr>& input_shape, const DataType& out_dtype) {
+  auto target = Target::Current(true);
+  if (target.defined() && target->kind->name == "llvm") {

Review comment:
       I think itd also be nice to add a comment here explaining why we're only doing this for llvm. That will help contributors decide which to apply to new hardware.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org