You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/10/04 08:34:51 UTC

[GitHub] [tvm] jiangjiajun commented on a change in pull request #9126: [Frontend][PaddlePaddle][Part1] Add 100+ operators for PaddlePaddle

jiangjiajun commented on a change in pull request #9126:
URL: https://github.com/apache/tvm/pull/9126#discussion_r721150375



##########
File path: python/tvm/relay/frontend/paddlepaddle.py
##########
@@ -191,57 +307,80 @@ def convert_dropout(g, op, block):
     """Operator converter for dropout."""
 
     x = g.get_node(op.input("X")[0])
-    out = _op.copy(x)
+    g.add_node(op.output("Out")[0], x)
+
+
+def convert_dot(g, op, block):
+    """Operator converter for dot."""
+
+    # x, y should be 1D or 2D tensor
+    # when it's 2D tensor, the first dimension means batch dimension
+    x = g.get_node(op.input("X")[0])
+    y = g.get_node(op.input("Y")[0])
+
+    out = _op.sum(_op.multiply(x, y), axis=[-1], keepdims=True)
     g.add_node(op.output("Out")[0], out)
 
 
 def convert_elementwise_op(g, op, block):
     """Operator converter for all the elementwise operators."""
 
     op_map = {
-        "elementwise_div": lambda x, y: x / y,
-        "elementwise_add": lambda x, y: x + y,
-        "elementwise_mul": lambda x, y: x * y,
-        "elementwise_sub": lambda x, y: x - y,
-        "elementwise_mod": lambda x, y: x % y,
+        "elementwise_div": "divide",
+        "elementwise_add": "add",
+        "elementwise_mul": "multiply",
+        "elementwise_sub": "subtract",
+        "elementwise_mod": "mod",
+        "elementwise_max": "maximum",
+        "elementwise_min": "minimum",
+        "elementwise_pow": "power",
+        "elementwise_floordiv": "floor_divide",
+        "equal": "equal",
+        "greater_equal": "greater_equal",
+        "greater_than": "greater",
+        "less_equal": "less_equal",
+        "less_than": "less",
+        "not_equal": "not_equal",
     }
     op_func = op_map[op.type]
     ipt0 = g.get_node(op.input("X")[0])
     ipt1 = g.get_node(op.input("Y")[0])
-    ipt0_shape = block.var(op.input("X")[0]).shape
-    ipt1_shape = block.var(op.input("Y")[0]).shape
+    ipt0_shape = infer_shape(ipt0)
+    ipt1_shape = infer_shape(ipt1)
     axis = op.attr("axis")
     if len(ipt0_shape) != len(ipt1_shape):
         if axis < 0:
             axis = axis + len(ipt0_shape)
         if axis != len(ipt0_shape) - 1:
             ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))
+    op_func = get_relay_op(op_func)
     out = op_func(ipt0, ipt1)
     g.add_node(op.output("Out")[0], out)
 
 
-def convert_equal(g, op, block):
-    """Operator converter for equal."""
+def convert_expand(g, op, block):
+    """Operator converter for expand."""
 
     x = g.get_node(op.input("X")[0])
-    y = g.get_node(op.input("Y")[0])
-    out = _op.equal(x, y)
+    if op.input("Shape"):
+        sizes = g.get_node(op.input("Shape")[0])
+        sizes = try_infer_value(sizes, g.get_params())[0]
+    else:
+        sizes = op.attr("shape")
+
+    if isinstance(sizes, np.ndarray):
+        sizes = sizes.tolist()
+
+    out = _op.broadcast_to(x, sizes)
     g.add_node(op.output("Out")[0], out)
 
 
-def convert_activation(g, op, block):
-    """Operator converter for all the activation."""
+def convert_expand_as(g, op, block):
+    """Operator converter for expand_as."""
 
-    op_map = {
-        "exp": _op.exp,
-        "relu": _op.nn.relu,
-        "tanh": _op.tanh,
-        "sqrt": _op.sqrt,
-        "erf": _op.erf,
-        "abs": _op.abs,
-    }
-    act_func = op_map[op.type]
-    out = act_func(g.get_node(op.input("X")[0]))
+    x = g.get_node(op.input("X")[0])
+    target_shape = op.attr("target_shape")
+    out = _op.broadcast_to(x, target_shape)

Review comment:
       PaddlePaddle's `expand_as` doesn't support multi-directional broadcasting, so this problem will not happen in PaddlePaddle frontend

##########
File path: python/tvm/relay/frontend/paddlepaddle.py
##########
@@ -17,73 +17,178 @@
 # pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
 # pylint: disable=import-outside-toplevel
 """Paddle: PArallel Distributed Deep LEarning."""
-import warnings
 
 import numpy as np
 
 import tvm
 from tvm.ir import IRModule
 
 from .. import analysis
+from .. import ty as _ty
 from .. import expr as _expr
 from .. import function as _function
 from .. import ty as _ty
 from .. import op as _op
 from .common import (
     fold_constant,
+    get_relay_op,
     infer_shape,
     infer_type,
     infer_value,
+    try_infer_value,
     new_var,
 )
 
 __all__ = ["from_paddle"]
 
 
+def _get_pad_size(in_size, dilated_kernel_size, stride_size):
+    """Calculate the paddings size."""

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org