You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by lm...@apache.org on 2020/11/08 03:53:31 UTC
[incubator-tvm] branch main updated: register auto-scheduler to
more ops (#6879)
This is an automated email from the ASF dual-hosted git repository.
lmzheng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 5bce73c register auto-scheduler to more ops (#6879)
5bce73c is described below
commit 5bce73c2a8b418d5b97e445ba73418adc22802f2
Author: Lianmin Zheng <li...@gmail.com>
AuthorDate: Sat Nov 7 19:53:15 2020 -0800
register auto-scheduler to more ops (#6879)
---
python/tvm/relay/op/strategy/cuda.py | 24 +++++++++++++
.../relay/test_auto_scheduler_task_extraction.py | 40 ++++++++++++++++------
2 files changed, 53 insertions(+), 11 deletions(-)
diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py
index 26e9a00..1229a71 100644
--- a/python/tvm/relay/op/strategy/cuda.py
+++ b/python/tvm/relay/op/strategy/cuda.py
@@ -146,6 +146,10 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target):
name="conv2d_nchw_winograd.cuda",
plevel=5,
)
+
+ strategy.add_auto_scheduler(
+ wrap_compute_conv2d(topi.nn.conv2d_nchw), name="conv2d_nchw"
+ )
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
@@ -271,6 +275,11 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target):
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.cuda",
)
+
+ strategy.add_auto_scheduler(
+ wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
+ name="depthwise_conv2d_nchw.cuda",
+ )
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
@@ -278,6 +287,11 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target):
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.cuda",
)
+
+ strategy.add_auto_scheduler(
+ wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
+ name="depthwise_conv2d_nhwc.cuda",
+ )
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
@@ -463,6 +477,11 @@ def conv3d_strategy_cuda(attrs, inputs, out_type, target):
name="conv3d_ncdhw_winograd.cuda",
plevel=5,
)
+
+ strategy.add_auto_scheduler(
+ wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
+ name="conv3d_ncdhw.cuda",
+ )
else: # layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ndhwc),
@@ -486,6 +505,11 @@ def conv3d_strategy_cuda(attrs, inputs, out_type, target):
plevel=20,
)
+ strategy.add_auto_scheduler(
+ wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
+ name="conv3d_ndhwc.cuda",
+ )
+
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_cudnn, True),
diff --git a/tests/python/relay/test_auto_scheduler_task_extraction.py b/tests/python/relay/test_auto_scheduler_task_extraction.py
index 63d4a6f..9f6ddb6 100644
--- a/tests/python/relay/test_auto_scheduler_task_extraction.py
+++ b/tests/python/relay/test_auto_scheduler_task_extraction.py
@@ -28,6 +28,10 @@ def get_network(name, batch_size=1, layout="NHWC"):
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
+ elif layout == "NCDHW":
+ image_shape = (3, 16, 224, 224)
+ elif layout == "NDHWC":
+ image_shape = (3, 224, 224, 16)
else:
raise ValueError("Invalid layout: " + layout)
@@ -39,14 +43,14 @@ def get_network(name, batch_size=1, layout="NHWC"):
mod, params = relay.testing.resnet.get_workload(
num_layers=50, batch_size=batch_size, layout=layout, image_shape=image_shape
)
- elif name == "resnet3d-18":
- mod, params = relay.testing.resnet_3d.get_workload(
- num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
- )
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, image_shape=image_shape
)
+ elif name == "resnet3d-18":
+ mod, params = relay.testing.resnet_3d.get_workload(
+ num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
+ )
elif name == "dcgan":
mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size, layout=layout)
elif name == "mlp":
@@ -70,20 +74,34 @@ def get_network(name, batch_size=1, layout="NHWC"):
@tvm.testing.requires_cuda
def test_task_extraction_cuda():
auto_scheduler.enable_relay_integration()
+ target = tvm.target.Target("cuda")
mod, params = get_network("mlp")
- target = tvm.target.Target("cuda")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
-
assert len(tasks) == 1
assert sum(task_weights) == 2
- mod, params = get_network("resnet-18")
- target = tvm.target.Target("cuda")
- tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
+ for layout in ["NHWC", "NCHW"]:
+ mod, params = get_network("resnet-18", layout=layout)
+ tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
+
+ assert len(tasks) == 21
+ assert sum(task_weights) == 22
+
+ mod, params = get_network("mobilenet", layout=layout)
+ tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
+
+ assert len(tasks) == 20
+ assert sum(task_weights) == 28
+
+ for layout in ["NCDHW", "NDHWC"]:
+ mod, params = get_network("resnet3d-18", layout=layout)
+ tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
+
+ assert len(tasks) == 21
+ assert sum(task_weights) == 22
- assert len(tasks) == 21
- assert sum(task_weights) == 22
+ auto_scheduler.enable_relay_integration(False)
if __name__ == "__main__":