You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by ma...@apache.org on 2021/10/21 04:42:42 UTC
[tvm] branch main updated: [Relay] Remove FTVMCompute from
TNonComputational ops (#9334)
This is an automated email from the ASF dual-hosted git repository.
masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new f4c146c [Relay] Remove FTVMCompute from TNonComputational ops (#9334)
f4c146c is described below
commit f4c146ca37c061a1192fd3aaa988ae23ed1bed67
Author: Lily Orth-Smith <li...@gmail.com>
AuthorDate: Wed Oct 20 21:42:15 2021 -0700
[Relay] Remove FTVMCompute from TNonComputational ops (#9334)
* remove FTVMCompute from noncomputational ops
* Remove injective schedule registration for on_device since it is non-computational
* lint
---
python/tvm/relay/op/_tensor.py | 3 ---
src/relay/op/annotation/annotation.cc | 7 +------
src/relay/op/memory/memory.cc | 21 +++------------------
src/relay/op/vm/vm.cc | 14 ++------------
4 files changed, 6 insertions(+), 39 deletions(-)
diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py
index 18ce933..daec488 100644
--- a/python/tvm/relay/op/_tensor.py
+++ b/python/tvm/relay/op/_tensor.py
@@ -89,9 +89,6 @@ register_injective_schedule("device_copy")
register_broadcast_schedule("fast_exp")
register_broadcast_schedule("fast_tanh")
register_broadcast_schedule("fast_erf")
-# a fake on_device schedule.
-# this will not be used in actual computation
-register_injective_schedule("on_device")
# zeros
diff --git a/src/relay/op/annotation/annotation.cc b/src/relay/op/annotation/annotation.cc
index beadf4a..8b00839 100644
--- a/src/relay/op/annotation/annotation.cc
+++ b/src/relay/op/annotation/annotation.cc
@@ -94,12 +94,7 @@ RELAY_REGISTER_OP("on_device")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
- .set_attr<TNonComputational>("TNonComputational", true)
- .set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<te::Tensor>& inputs,
- const Type& out_type) -> Array<te::Tensor> {
- return {topi::identity(inputs[0])};
- });
+ .set_attr<TNonComputational>("TNonComputational", true);
OnDeviceProps GetOnDeviceProps(const CallNode* call_node) {
if (call_node->op == OnDeviceOp()) {
diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc
index 5339d48..6b22cfd 100644
--- a/src/relay/op/memory/memory.cc
+++ b/src/relay/op/memory/memory.cc
@@ -91,12 +91,7 @@ RELAY_REGISTER_OP("memory.alloc_storage")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
- .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
- .set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<te::Tensor>& inputs,
- const Type& out_dtype) -> Array<te::Tensor> {
- return {topi::identity(inputs[0])};
- });
+ .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
Expr AllocTensor(Expr storage, Expr offset, Expr shape, DataType dtype,
Array<IndexExpr> assert_shape) {
@@ -206,12 +201,7 @@ RELAY_REGISTER_OP("memory.alloc_tensor")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
- .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
- .set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<te::Tensor>& inputs,
- const Type& out_dtype) -> Array<te::Tensor> {
- return {topi::identity(inputs[0])};
- });
+ .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
bool KillRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
@@ -230,12 +220,7 @@ RELAY_REGISTER_OP("memory.kill")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
- .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
- .set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<te::Tensor>& inputs,
- const Type& out_dtype) -> Array<te::Tensor> {
- return {topi::identity(inputs[0])};
- });
+ .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
static void FlattenTupleTypeAux(const Type& type, std::vector<TensorType>* out) {
if (auto tt = type.as<TensorTypeNode>()) {
diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc
index be31b54..65a4ec0 100644
--- a/src/relay/op/vm/vm.cc
+++ b/src/relay/op/vm/vm.cc
@@ -138,12 +138,7 @@ RELAY_REGISTER_OP("vm.shape_func")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
- .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
- .set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<te::Tensor>& inputs,
- const Type& out_dtype) -> Array<te::Tensor> {
- return {topi::identity(inputs[0])};
- });
+ .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
// vm.invoke_tvm_op
bool InvokeTVMOpRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
@@ -188,12 +183,7 @@ RELAY_REGISTER_OP("vm.invoke_tvm_op")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
- .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
- .set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<te::Tensor>& inputs,
- const Type& out_dtype) -> Array<te::Tensor> {
- return {topi::identity(inputs[0])};
- });
+ .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
// vm.reshape
TVM_REGISTER_NODE_TYPE(ReshapeTensorAttrs);