You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/11/04 11:22:21 UTC

[GitHub] [incubator-tvm] giuseros commented on a change in pull request #6840: conv1d_transpose speedup

giuseros commented on a change in pull request #6840:
URL: https://github.com/apache/incubator-tvm/pull/6840#discussion_r517271380



##########
File path: tests/python/topi/python/test_topi_conv1d_transpose_ncw.py
##########
@@ -94,6 +94,9 @@ def test_conv1d_transpose_ncw():
     verify_conv1d_transpose_ncw(1, 1, 10, 1, 5, 1, (0, 3), (0,))
     verify_conv1d_transpose_ncw(1, 1, 10, 1, 5, 1, (1, 3), (0,))
     verify_conv1d_transpose_ncw(1, 1, 10, 1, 5, 1, (2, 3), (0,))
+    verify_conv1d_transpose_ncw(1, 257, 128, 1, 512, 128, 256, (0,))
+    verify_conv1d_transpose_ncw(1, 257, 128, 2, 512, 128, 256, (0,))
+    verify_conv1d_transpose_ncw(1, 257, 128, 257, 512, 128, 256, (0,))

Review comment:
       Could you add a test for the case kernel_size==stride? I think this is common in encoder/decoder networks

##########
File path: python/tvm/topi/cuda/conv1d_transpose_ncw.py
##########
@@ -65,36 +66,43 @@ def conv1d_transpose_ncw(cfg, data, kernel, stride, padding, out_dtype, output_p
     out_width = (inp_width - 1) * stride + kernel_size - pad_left - pad_right + output_padding
     pad_left = kernel_size - 1 - pad_left
     pad_right = kernel_size - 1 - pad_right + output_padding
+    padded_width = pad_left + inp_width + pad_right
     dilated_width = stride * (inp_width - 1) + 1
-    data = te.compute(
-        (batch, inp_channels, pad_left + dilated_width + pad_right),
+    padded_dilated_width = pad_left + dilated_width + pad_right
+
+    padded_data = te.compute(
+        (batch, inp_channels, padded_width),
         lambda n, c, x: tvm.tir.if_then_else(
-            tvm.tir.all(
-                x >= pad_left,
-                x < pad_left + dilated_width,
-                tvm.tir.indexmod(x - pad_left, stride).equal(0),
-            ),
-            data[n, c, tvm.tir.indexdiv(x - pad_left, stride)],
-            tvm.tir.const(0.0, "float32"),
-        ),
-        name="data_pad",
-    )
-
-    dc = te.reduce_axis((0, inp_channels), name="dc")
-    dw = te.reduce_axis((0, kernel_size), name="dw")
+            tvm.tir.all(x >= pad_left,
+                        x < pad_left + inp_width),
+            data[n, c, x - pad_left],
+            tvm.tir.const(0., "float32")),
+        name='data_pad')
+
+    padded_kernel = te.compute(
+        (inp_channels, out_channels, kernel_size + stride - 1),
+        lambda ci, co, k: tvm.tir.if_then_else(
+            tvm.tir.all(k < kernel_size),
+            kernel[ci, co, kernel_size-k-1],
+            tvm.tir.const(0., "float32")),
+        name='kernel_pad')
+
+    ci = te.reduce_axis((0, inp_channels), name='ci')
+    k = te.reduce_axis((0, (kernel_size + stride - 1)//stride), name='k')
+    border = pad_left * (stride - 1)
+
     data_out = te.compute(
         (batch, out_channels, out_width),
-        lambda b, c, w: te.sum(
-            data[b, dc, w + dw].astype(out_dtype)
-            * kernel[dc, c, kernel_size - 1 - dw].astype(out_dtype),
-            axis=[dc, dw],
-        ),
-        tag="conv1d_transpose_ncw",
-    )
+        lambda b, co, w: te.sum(
+            padded_data[b, ci, (border+w + stride - 1) // stride + k].astype(out_dtype) *
+            padded_kernel[ci, co, k*stride + tvm.tir.indexmod(stride-w-border, stride)].astype(out_dtype),
+            axis=[ci, k]), tag="conv1d_transpose_ncw")

Review comment:
       Could you add a bit more comments about the algorithm you are using?

##########
File path: python/tvm/topi/cuda/conv1d_transpose_ncw.py
##########
@@ -65,36 +66,43 @@ def conv1d_transpose_ncw(cfg, data, kernel, stride, padding, out_dtype, output_p
     out_width = (inp_width - 1) * stride + kernel_size - pad_left - pad_right + output_padding
     pad_left = kernel_size - 1 - pad_left
     pad_right = kernel_size - 1 - pad_right + output_padding
+    padded_width = pad_left + inp_width + pad_right
     dilated_width = stride * (inp_width - 1) + 1
-    data = te.compute(
-        (batch, inp_channels, pad_left + dilated_width + pad_right),
+    padded_dilated_width = pad_left + dilated_width + pad_right
+
+    padded_data = te.compute(
+        (batch, inp_channels, padded_width),
         lambda n, c, x: tvm.tir.if_then_else(
-            tvm.tir.all(
-                x >= pad_left,
-                x < pad_left + dilated_width,
-                tvm.tir.indexmod(x - pad_left, stride).equal(0),
-            ),
-            data[n, c, tvm.tir.indexdiv(x - pad_left, stride)],
-            tvm.tir.const(0.0, "float32"),
-        ),
-        name="data_pad",
-    )
-
-    dc = te.reduce_axis((0, inp_channels), name="dc")
-    dw = te.reduce_axis((0, kernel_size), name="dw")
+            tvm.tir.all(x >= pad_left,
+                        x < pad_left + inp_width),
+            data[n, c, x - pad_left],
+            tvm.tir.const(0., "float32")),
+        name='data_pad')
+
+    padded_kernel = te.compute(
+        (inp_channels, out_channels, kernel_size + stride - 1),
+        lambda ci, co, k: tvm.tir.if_then_else(
+            tvm.tir.all(k < kernel_size),
+            kernel[ci, co, kernel_size-k-1],
+            tvm.tir.const(0., "float32")),
+        name='kernel_pad')
+
+    ci = te.reduce_axis((0, inp_channels), name='ci')
+    k = te.reduce_axis((0, (kernel_size + stride - 1)//stride), name='k')
+    border = pad_left * (stride - 1)
+
     data_out = te.compute(
         (batch, out_channels, out_width),
-        lambda b, c, w: te.sum(
-            data[b, dc, w + dw].astype(out_dtype)
-            * kernel[dc, c, kernel_size - 1 - dw].astype(out_dtype),
-            axis=[dc, dw],
-        ),
-        tag="conv1d_transpose_ncw",
-    )
+        lambda b, co, w: te.sum(
+            padded_data[b, ci, (border+w + stride - 1) // stride + k].astype(out_dtype) *

Review comment:
       Are you sure this works for:padding=0, kernel_size=2, stride=2? 
   In this case `ceil(w,stride)` gives 0, 1, 1, 2, 2, .... While *I think* (but might be wrong) that you want 0, 0, 1, 1, 2, 2, etc... when you index the `padded_data`  tensor

##########
File path: python/tvm/topi/cuda/conv1d_transpose_ncw.py
##########
@@ -65,36 +66,43 @@ def conv1d_transpose_ncw(cfg, data, kernel, stride, padding, out_dtype, output_p
     out_width = (inp_width - 1) * stride + kernel_size - pad_left - pad_right + output_padding
     pad_left = kernel_size - 1 - pad_left
     pad_right = kernel_size - 1 - pad_right + output_padding
+    padded_width = pad_left + inp_width + pad_right
     dilated_width = stride * (inp_width - 1) + 1
-    data = te.compute(
-        (batch, inp_channels, pad_left + dilated_width + pad_right),
+    padded_dilated_width = pad_left + dilated_width + pad_right
+
+    padded_data = te.compute(
+        (batch, inp_channels, padded_width),
         lambda n, c, x: tvm.tir.if_then_else(
-            tvm.tir.all(
-                x >= pad_left,
-                x < pad_left + dilated_width,
-                tvm.tir.indexmod(x - pad_left, stride).equal(0),
-            ),
-            data[n, c, tvm.tir.indexdiv(x - pad_left, stride)],
-            tvm.tir.const(0.0, "float32"),
-        ),
-        name="data_pad",
-    )
-
-    dc = te.reduce_axis((0, inp_channels), name="dc")
-    dw = te.reduce_axis((0, kernel_size), name="dw")
+            tvm.tir.all(x >= pad_left,
+                        x < pad_left + inp_width),
+            data[n, c, x - pad_left],
+            tvm.tir.const(0., "float32")),
+        name='data_pad')
+
+    padded_kernel = te.compute(
+        (inp_channels, out_channels, kernel_size + stride - 1),
+        lambda ci, co, k: tvm.tir.if_then_else(
+            tvm.tir.all(k < kernel_size),
+            kernel[ci, co, kernel_size-k-1],
+            tvm.tir.const(0., "float32")),
+        name='kernel_pad')
+
+    ci = te.reduce_axis((0, inp_channels), name='ci')
+    k = te.reduce_axis((0, (kernel_size + stride - 1)//stride), name='k')

Review comment:
       Instead of writing `(kernel_size + stride - 1)//stride`, could you write `ceil(kernel_size/stride)`? At the end, it would make the code more understandable, and wouldn't hurt performance. 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org