You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/09/02 13:53:43 UTC

[GitHub] [tvm] tqchen commented on a change in pull request #8833: [2/10] CMSIS-NN code generator for softmax

tqchen commented on a change in pull request #8833:
URL: https://github.com/apache/tvm/pull/8833#discussion_r701105838



##########
File path: src/relay/backend/contrib/cmsisnn/codegen_cmsisnn.cc
##########
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include <cmath>
+#include <fstream>
+#include <map>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "../../../../runtime/file_utils.h"
+#include "../../../../target/source/codegen_c.h"
+#include "../../../qnn/utils.h"
+
+namespace tvm {
+namespace runtime {
+
+using namespace tir;
+
+class CodeGenCMSISNN : public tvm::codegen::CodeGenC {
+ public:
+  void Init(bool output_ssa) {
+    decl_stream << "#include <stdio.h>\n";
+    decl_stream << "#include <stdlib.h>\n";
+    decl_stream << "#include <dlpack/dlpack.h>\n";
+    decl_stream << "#include <tvm/runtime/crt/module.h>\n";
+    decl_stream << "#include <arm_nnfunctions.h>\n";
+    CodeGenC::Init(output_ssa);
+  }
+
+  /*!
+   * \brief Emit code that offloads a subgraph to the Cortex-M
+   *
+   * \return string of code that offloads a subgraph to the Cortex-M
+   */
+  void AddFunction(const PrimFunc& prim_func) {
+    PrintExternCPrefix(stream);
+    CodeGenC::AddFunction(prim_func);
+    PrintExternCPostfix(stream);
+  }
+
+ private:
+  void VisitExpr_(const CallNode* op, std::ostream& os) {  // NOLINT(*)
+    if (!op->op.same_as(builtin::call_extern())) {
+      return;
+    }
+    std::string cmsis_func_name = op->args[0].as<StringImmNode>()->value;
+    if (cmsis_func_name == "arm_softmax_s8") {
+      EmitSoftmax(op);
+    }
+    return;
+  }
+
+  /*!  * \brief Creates a cplusplus guard prefix for extern "C" printing */
+  void PrintExternCPrefix(std::ostringstream& ss) {
+    PrintIndent();
+    ss << "#ifdef __cplusplus\n";
+    ss << "extern \"C\" {\n";
+    ss << "#endif\n";
+  }
+
+  /*!  * \brief Creates a cplusplus guard postfix for extern "C" printing */
+  void PrintExternCPostfix(std::ostringstream& ss) {
+    PrintIndent();
+    ss << "#ifdef __cplusplus\n";
+    ss << "}\n";
+    ss << "#endif\n";
+  }
+
+  /*!  * \brief Emits CMSIS-NN code block for softmax */
+  void EmitSoftmax(const CallNode* op) {
+    // @tir.call_extern("arm_softmax_s8", buffer_0, num_rows, row_size, scale, buffer_1, dtype=int8)
+    std::string cmsis_func_name = op->args[0].as<StringImmNode>()->value;
+    int32_t num_rows = op->args[2].as<IntImmNode>()->value;
+    int32_t row_size = op->args[3].as<IntImmNode>()->value;
+    float quant_scale = op->args[4].as<FloatImmNode>()->value;
+
+    // calculate multiplier and shift for CMSIS-NN softmax API
+    // Note: tfl micro assumptions
+    // TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
+    // TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128);
+    // TF_LITE_ENSURE(context, output->params.scale == 1.f / 256);
+    double beta = 1.0;
+    int32_t input_bits = 5;
+    double beta_multiplier = (beta * quant_scale * (1 << (31 - input_bits)));
+    beta_multiplier = std::min<double>(beta_multiplier, (1ll << 31) - 1.0);
+    auto mult_shift_pair = tvm::relay::qnn::GetFixedPointMultiplierShift(beta_multiplier);

Review comment:
       On the data structure bits. Note that most of the data structures are passed as pointers(handle), this means we likely only need intrinsics to construct those data structures(return handles), then pass as argument to the call.
   
   We would prefer this way of reusing existing construct(via composing intrinsics) than adding semantics to the TIR when possible.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org