You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/12/16 22:30:49 UTC

[GitHub] [tvm] electriclilies commented on a change in pull request #9749: [Relay] Fix invalid shape function for "copy" operator

electriclilies commented on a change in pull request #9749:
URL: https://github.com/apache/tvm/pull/9749#discussion_r770956692



##########
File path: include/tvm/runtime/debug.h
##########
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file tvm/runtime/debug.h
+ * \brief Helpers for debugging at runtime.
+ */
+#ifndef TVM_RUNTIME_DEBUG_H_
+#define TVM_RUNTIME_DEBUG_H_
+
+#include <tvm/runtime/container/adt.h>
+#include <tvm/runtime/ndarray.h>
+
+#include <ostream>
+#include <string>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ * \brief Helpers to describe runtime objects in human-friendly form. For \p nd_arrays we show their
+ * shapes and dtypes, but also their contents if 'small' and on the \p host_device (mostly so that
+ * we can see dynamic shapes as they are computed). For \p adts we show the ADT fields. For
+ * \p objects we dispatch to one of the above as appropriate.
+ */
+void AppendNDArray(std::ostream& os, const NDArray& nd_array, const DLDevice& host_device,
+                   bool show_content = true);
+void AppendADT(std::ostream& os, const ADT& adt, const DLDevice& host_device,
+               bool show_content = true);
+void AppendRuntimeObject(std::ostream& os, const ObjectRef& object, const DLDevice& host_device,
+                         bool show_content = true);

Review comment:
       Nice, this will make debugging shape functions easier!

##########
File path: src/target/compilation_config.cc
##########
@@ -61,47 +61,47 @@ void CompilationConfigNode::EstablishDefaultSEScopes(const transform::PassContex
   if (host_target.defined()) {
     CHECK(!host_target->host.defined()) << "Host targets are not expected to have hosts";
     host_device_type = static_cast<DLDeviceType>(host_target->kind->device_type);
-    DLOG(INFO) << "Using the given host target " << host_target->ToDebugString()
-               << " of device type " << host_device_type << " for the host target";
+    VLOG(1) << "Using the given host target " << host_target->ToDebugString() << " of device type "
+            << host_device_type << " for the host target";

Review comment:
       Have we discussed VLOG vs DLOG? It seems like people are still adding DLOGs. Might be worth discussing unifying around one debugging system at some point in the future.

##########
File path: tests/python/relay/dyn/test_dynamic_op_level3.py
##########
@@ -410,5 +418,75 @@ def verify_sparse_fill_empty_rows(
     )
 
 
+def test_dyn_copy():
+    target = tvm.target.Target("llvm")
+    dev = tvm.cpu()
+    mod = tvm.parser.fromtext(
+        """
+        #[version = "0.0.5"]
+        def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int64] {
+          copy(%x)
+        }
+        """
+    )
+    x_data = np.random.rand(15, 3).astype("int64")
+    expected = x_data
+    check_on_vm(target, dev, [x_data], expected, mod)
+
+
+def test_dyn_copy_scalar():
+    target = tvm.target.Target("llvm")
+    dev = tvm.cpu()
+    mod = tvm.parser.fromtext(
+        """
+        #[version = "0.0.5"]
+        def @main(%x: int32, %y: Tensor[(?), int32]) -> Tensor[(?), int32] {
+          %0 = copy(%x);
+          %1 = expand_dims(%0, axis=0);
+          %2 = (%y, %1);
+          concatenate(%2)
+        }
+        """
+    )
+    x_data = 3
+    y_data = np.random.rand(7).astype("int32")
+    expected = np.concatenate((y_data, np.expand_dims(x_data, axis=0)))
+    check_on_vm(target, dev, [x_data, y_data], expected, mod)
+
+
+def test_dyn_expand_dims():

Review comment:
       There's another test_dyn_expand_dims in this file, possibly bad rebase?

##########
File path: src/relay/backend/te_compiler_cache.cc
##########
@@ -635,20 +640,16 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
     // Get output ndims
     auto ret_type = call_node->checked_type();
     Array<IndexExpr> out_ndims;
-    if (const auto* ttype = ret_type.as<TensorTypeNode>()) {
+    for (const auto& ttype : FlattenTupleType(ret_type)) {
       out_ndims.push_back(IntImm(DataType::Int(32), ttype->shape.size()));

Review comment:
       Is this change related to fixing the copy operator or just a general improvement?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org