You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/03/05 17:14:58 UTC

[GitHub] [incubator-tvm] inadob commented on issue #4805: [Frontend][TFlite] Add parser support for relu6, leaky_relu, relu_n1_to_1, log_softmax

inadob commented on issue #4805: [Frontend][TFlite] Add parser support for relu6, leaky_relu, relu_n1_to_1, log_softmax
URL: https://github.com/apache/incubator-tvm/pull/4805#issuecomment-595344454
 
 
   > Can you show me code example here? I have typically used float numbers like 1.0 or 6.0 to work with `a_min` or `a_max`
   
   ```c++
   Expr ClipQnnCanonicalize(const Attrs& attrs, const Array<Expr>& new_args,
                                  const Array<tvm::relay::Type>& arg_types) {
     CHECK_EQ(new_args.size(), 7);
     auto& input_tensor = new_args[0];
     auto& input_scale = new_args[1]; // in fp
     auto& input_zero_point = new_args[2]; // in int32
     auto& clip_min = new_args[3];  // value is in fp
     auto& clip_max = new_args[4];  // value is in fp
     auto& output_scale = new_args[5];
     auto& output_zero_point = new_args[6];
   
     // Get the input dtype and shape.
     CHECK_EQ(arg_types.size(), 8);
     auto tensor_type = arg_types[0].as<TensorTypeNode>();
     CHECK(tensor_type != nullptr);
     auto input_dtype = tensor_type->dtype;
     auto input_shape = tensor_type->shape;
   
   
     // shift the input by subtracting the input zero_point
     auto shifted_input = Subtract(Cast(input_tensor, DataType::Int(32)), input_zero_point);
   
     // do the clipping in int32
     // auto clipped_tensor = Clip(shifted_input, clip_min, clip_max)
     auto clipped_tensor = Clip(shifted_input, Cast(clip_min, DataType::Float(64)), Cast(clip_max, DataType::Float(64)))
     // shift the input back by adding the zero_point
     clipped_tensor = Add(clipped_tensor, input_zero_point);
   
     // requantize the output if needed
     auto requantized_output = clipped_tensor;
     if (!IsEqualScalar(input_scale, output_scale) ||
         !IsEqualScalar(input_zero_point, output_zero_point)) {
        requantized_output = Requantize(clipped_tensor, input_shape, input_scale, input_zero_point, output_scale,
                                        output_zero_point, DataType::Int(32));
       }
   
     // Go back to lower precision.
     auto q_min = GetQmin(input_dtype);
     auto q_max = GetQmax(input_dtype);
     requantized_output = Clip(requantized_output, q_min, q_max); 
     return Cast(requantized_output, input_dtype);
   }
   ```
   And I am getting a complaint about **double dtype**...
   ```
   /workspace/src/relay/qnn/op/clip.cc:61:117: error: cannot convert 'tvm::relay::Expr {aka tvm::RelayExpr}' to 'double' for argument '2' to 'tvm::relay::Expr tvm::relay::Clip(tvm::relay::Expr, double, double)'
      auto clipped_tensor = Clip(shifted_input, Cast(clip_min, DataType::Float(64)), Cast(clip_max, DataType::Float(64)))
   
   ```
   
   The commented out line where I directly do clip() without casting to float64 didn't work too.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services