You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by ma...@apache.org on 2020/09/18 10:16:46 UTC

[incubator-tvm] branch master updated: Add several op mapping in PyTorch frontend (#6472)

This is an automated email from the ASF dual-hosted git repository.

masahi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
     new 28ea54a  Add several op mapping in PyTorch frontend (#6472)
28ea54a is described below

commit 28ea54aa9d09f5087ef9ee467168a3c6c596a336
Author: Yong Wu <yw...@alumni.jh.edu>
AuthorDate: Fri Sep 18 18:16:27 2020 +0800

    Add several op mapping in PyTorch frontend (#6472)
    
    * Add copy_ and clamp_ in PyTorch frontend
    
    * add true_divide in PyTorch frontend
    
    * more test cases for copy_
    
    * fix format
    
    * remove copy_
    
    * fix format
    
    * skip true_divide for torch < 1.5
---
 python/tvm/relay/frontend/pytorch.py          |  2 ++
 tests/python/frontend/pytorch/test_forward.py | 47 +++++++++++++++++++++++++--
 2 files changed, 46 insertions(+), 3 deletions(-)

diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py
index c9320a9..9ceb9fc 100644
--- a/python/tvm/relay/frontend/pytorch.py
+++ b/python/tvm/relay/frontend/pytorch.py
@@ -2509,6 +2509,7 @@ def _get_convert_map(prelude, default_dtype):
         "aten::div": _elemwise("divide"),
         "aten::div_": _elemwise("divide"),
         "aten::floor_divide": _elemwise("floor_divide"),
+        "aten::true_divide": _elemwise("divide"),
         "aten::addcdiv": _addcdiv(),
         "aten::addcmul": _addcmul(),
         "aten::ones": _ones(default_dtype),
@@ -2630,6 +2631,7 @@ def _get_convert_map(prelude, default_dtype):
         "aten::isinf": _unary("isinf"),
         "aten::isnan": _unary("isnan"),
         "aten::clamp": _clamp(),
+        "aten::clamp_": _clamp(),
         "aten::detach": _identity(),
         "aten::upsample_bilinear2d": _upsample("bilinear", prelude),
         "aten::upsample_nearest2d": _upsample("nearest_neighbor", prelude),
diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py
index e8a8507..83ba22b 100644
--- a/tests/python/frontend/pytorch/test_forward.py
+++ b/tests/python/frontend/pytorch/test_forward.py
@@ -21,15 +21,14 @@ import sys
 from scipy.stats import t as tdistr
 import numpy as np
 import torch
+import torchvision
 from torch.nn import Module
 import tvm
-import torchvision
-
 from tvm import relay
 from tvm.contrib import graph_runtime
 from tvm.contrib.nvcc import have_fp16
 import tvm.testing
-
+from packaging import version as package_version
 
 sys.setrecursionlimit(10000)
 
@@ -2399,6 +2398,24 @@ def test_forward_clamp():
 
 
 @tvm.testing.uses_gpu
+def test_forward_clamp_():
+    torch.set_grad_enabled(False)
+
+    class ClampInPlace(Module):
+        def __init__(self, min, max):
+            super(ClampInPlace, self).__init__()
+            self.min = min
+            self.max = max
+
+        def forward(self, *args):
+            return torch.clamp_(args[0], self.min, self.max)
+
+    for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):
+        input_data = torch.rand(ishape).float()
+        verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)
+
+
+@tvm.testing.uses_gpu
 def test_forward_ones():
     torch.set_grad_enabled(False)
 
@@ -2896,6 +2913,28 @@ def test_forward_addcmul():
 
 
 @tvm.testing.uses_gpu
+def test_forward_true_divide():
+    if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
+        return
+    torch.set_grad_enabled(False)
+
+    class TrueDivide(Module):
+        def forward(self, *args):
+            return torch.true_divide(args[0], args[1])
+
+    dividend = torch.rand([5, 3]).float()
+    # divisor could be either tensor or scalar
+    divisor_tensor = torch.rand([5, 3]).float() + 0.5
+    divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
+    verify_model(
+        TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
+    )
+    verify_model(
+        TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
+    )
+
+
+@tvm.testing.uses_gpu
 def test_forward_traced_function():
     def fn(t1, t2):
         return t1 + t2
@@ -3308,6 +3347,7 @@ if __name__ == "__main__":
     test_forward_where()
     test_forward_addcdiv()
     test_forward_addcmul()
+    test_forward_true_divide()
     test_forward_clone()
     test_forward_softplus()
     test_forward_softsign()
@@ -3323,6 +3363,7 @@ if __name__ == "__main__":
     test_forward_pow()
     test_forward_unary()
     test_forward_clamp()
+    test_forward_clamp_()
     test_forward_logical_not()
     test_forward_bitwise_not()
     test_forward_bitwise_xor()