You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2022/04/05 00:36:56 UTC
[GitHub] [tvm] zxybazh opened a new issue, #10900: [Bug] `Compute_inline` Accepts Invalid Block Causing Error
zxybazh opened a new issue, #10900:
URL: https://github.com/apache/tvm/issues/10900
In the following script `compute_inline` accidentally accepts invalid pure spatial block with init block inside, causing undefined reference to a tensor created by `buffer_decl`.
```
from tvm.tir import Schedule
import tvm
from tvm.script import tir as T
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
# from tvm.script import tir as T
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder: T.Buffer[(1, 512, 7, 7), "float32"], tensor: T.Buffer[(1, 512, 1, 1), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":64, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
tensor_1 = T.alloc_buffer([1, 512, 1, 1], dtype="float32")
tensor_1_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32")
for i0, i1, i2, i3, i4_i5_fused_0_i4_i5_fused_1_fused_0, i4_i5_fused_0_i4_i5_fused_1_fused_1 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("tensor_rf"):
vi4_i5_fused_0_i4_i5_fused_1_fused_0 = T.axis.spatial(49, i4_i5_fused_0_i4_i5_fused_1_fused_0)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
T.reads(placeholder[ax0, ax1, ax2 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 // 7, ax3 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 % 7])
T.writes(tensor_1_rf[ax0, ax1, ax2, ax3, vi4_i5_fused_0_i4_i5_fused_1_fused_0])
with T.init():
tensor_1_rf[ax0, ax1, ax2, ax3, vi4_i5_fused_0_i4_i5_fused_1_fused_0] = T.float32(0)
tensor_1_rf[ax0, ax1, ax2, ax3, vi4_i5_fused_0_i4_i5_fused_1_fused_0] = tensor_1_rf[ax0, ax1, ax2, ax3, vi4_i5_fused_0_i4_i5_fused_1_fused_0] + placeholder[ax0, ax1, ax2 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 // 7, ax3 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 % 7]
for i0, i1 in T.grid(1, 512):
for ax0, ax1, ax2, ax3, ax4 in T.grid(49, 1, 1, 1, 1):
with T.block("tensor"):
vi4_i5_fused_0_i4_i5_fused_1_fused_0, ax0_1 = T.axis.remap("RS", [ax0, ax1])
ax1_1 = T.axis.spatial(512, i1 + ax2)
ax2_1, ax3_1 = T.axis.remap("SS", [ax3, ax4])
T.reads(tensor_1_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4_i5_fused_0_i4_i5_fused_1_fused_0])
T.writes(tensor_1[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
tensor_1[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
tensor_1[ax0_1, ax1_1, ax2_1, ax3_1] = tensor_1[ax0_1, ax1_1, ax2_1, ax3_1] + tensor_1_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4_i5_fused_0_i4_i5_fused_1_fused_0]
for i2, i3 in T.grid(1, 1):
with T.block("tensor_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(tensor_1[ax0, ax1, ax2, ax3])
T.writes(tensor[ax0, ax1, ax2, ax3])
tensor[ax0, ax1, ax2, ax3] = tensor_1[ax0, ax1, ax2, ax3] * T.float32(0.020408163265306121)
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
sch = Schedule(Module)
print(sch.mod.script())
b15 = sch.get_block(name="tensor_rf", func_name="main")
sch.compute_inline(block=b15)
print(sch.mod.script())
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org.apache.org
For queries about this service, please contact Infrastructure at:
users@infra.apache.org
[GitHub] [tvm] junrushao1994 closed issue #10900: [Bug] `Compute_inline` Accepts Invalid Block Causing Error
Posted by GitBox <gi...@apache.org>.
junrushao1994 closed issue #10900: [Bug] `Compute_inline` Accepts Invalid Block Causing Error
URL: https://github.com/apache/tvm/issues/10900
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org
For queries about this service, please contact Infrastructure at:
users@infra.apache.org
[GitHub] [tvm] zxybazh commented on issue #10900: [Bug] `Compute_inline` Accepts Invalid Block Causing Error
Posted by GitBox <gi...@apache.org>.
zxybazh commented on issue #10900:
URL: https://github.com/apache/tvm/issues/10900#issuecomment-1088153208
Here is the generated tir:
```
# from tvm.script import tir as T
@tvm.script.ir_module
class Module:
@T.prim_func
def main(placeholder: T.Buffer[(1, 512, 7, 7), "float32"], tensor: T.Buffer[(1, 512, 1, 1), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# buffer definition
tensor_1_rf = T.buffer_decl([1, 512, 1, 1, 49], dtype="float32")
# body
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":64, "meta_schedule.unroll_explicit":0, "meta_schedule.vectorize":64})
tensor_1 = T.alloc_buffer([1, 512, 1, 1], dtype="float32")
for i0, i1 in T.grid(1, 512):
for ax0, ax1, ax2, ax3, ax4 in T.grid(49, 1, 1, 1, 1):
with T.block("tensor"):
vi4_i5_fused_0_i4_i5_fused_1_fused_0, ax0_1 = T.axis.remap("RS", [ax0, ax1])
ax1_1 = T.axis.spatial(512, i1 + ax2)
ax2_1, ax3_1 = T.axis.remap("SS", [ax3, ax4])
T.reads(tensor_1_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4_i5_fused_0_i4_i5_fused_1_fused_0], placeholder[ax0_1, ax1_1, ax2_1 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 // 7, ax3_1 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 % 7])
T.writes(tensor_1[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
tensor_1[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
tensor_1[ax0_1, ax1_1, ax2_1, ax3_1] = tensor_1[ax0_1, ax1_1, ax2_1, ax3_1] + (tensor_1_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4_i5_fused_0_i4_i5_fused_1_fused_0] + placeholder[ax0_1, ax1_1, ax2_1 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 // 7, ax3_1 * 7 + vi4_i5_fused_0_i4_i5_fused_1_fused_0 % 7])
for i2, i3 in T.grid(1, 1):
with T.block("tensor_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(tensor_1[ax0, ax1, ax2, ax3])
T.writes(tensor[ax0, ax1, ax2, ax3])
tensor[ax0, ax1, ax2, ax3] = tensor_1[ax0, ax1, ax2, ax3] * T.float32(0.020408163265306121)
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org
For queries about this service, please contact Infrastructure at:
users@infra.apache.org