You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2021/03/09 16:53:46 UTC

[incubator-mxnet] branch v1.x updated: add roberta tests (#19996)

This is an automated email from the ASF dual-hosted git repository.

zhasheng pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new 9561f2b  add roberta tests (#19996)
9561f2b is described below

commit 9561f2b418db299a4bbc07994fe230fcf5a7c94f
Author: Zhaoqi Zhu <zh...@gmail.com>
AuthorDate: Tue Mar 9 08:52:01 2021 -0800

    add roberta tests (#19996)
---
 tests/python-pytest/onnx/test_onnxruntime.py | 61 +++++++++++++++++++++++++++-
 1 file changed, 60 insertions(+), 1 deletion(-)

diff --git a/tests/python-pytest/onnx/test_onnxruntime.py b/tests/python-pytest/onnx/test_onnxruntime.py
index 48a8a43..5cb4fe7 100644
--- a/tests/python-pytest/onnx/test_onnxruntime.py
+++ b/tests/python-pytest/onnx/test_onnxruntime.py
@@ -555,8 +555,67 @@ def test_action_recognition_model_inference_onnxruntime(tmp_path, model, act_rec
     finally:
         shutil.rmtree(tmp_path)
 
+
 @with_seed()
-@pytest.mark.parametrize('model', ['bert_12_768_12'])
+@pytest.mark.parametrize('model_name', ['roberta_24_1024_16', 'roberta_12_768_12'])
+def test_roberta_inference_onnxruntime(tmp_path, model_name):
+    tmp_path = str(tmp_path)
+    try:
+        import gluonnlp as nlp
+        ctx = mx.cpu(0)
+
+        dataset= 'openwebtext_ccnews_stories_books_cased'#'book_corpus_wiki_en_uncased'
+        model, _ = nlp.model.get_model(
+        name=model_name,
+        ctx=ctx,
+        pretrained=True,
+        use_decoder=True,
+        dataset_name=dataset)
+        
+        model.hybridize(static_alloc=False)
+
+        batch = 2
+        seq_length = 32
+        num_masked_positions = 1
+        inputs = mx.nd.random.uniform(0, 30522, shape=(batch, seq_length), dtype='float32', ctx=ctx)
+        valid_length = mx.nd.array([seq_length] * batch, dtype='float32', ctx=ctx)
+        masked_positions = mx.nd.random.uniform(0, 32, shape=(batch, num_masked_positions),
+            dtype='float32', ctx=ctx).astype('int32')
+
+        sequence_outputs, attention_outputs= model(inputs, valid_length, masked_positions)    
+
+        model_dir = f'roberta_model'
+        if not os.path.isdir(model_dir):
+            os.mkdir(model_dir)
+
+        prefix = '%s/%s' % (model_dir, model_name)
+        model.export(prefix)
+
+        sym_file = "%s-symbol.json" % prefix
+        params_file = "%s-0000.params" % prefix
+        onnx_file = "%s.onnx" % prefix
+        input_shapes = [(batch, seq_length), (batch,), (batch, num_masked_positions)]
+        converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes,
+                                               [np.float32, np.float32, np.int32],
+                                               onnx_file, verbose=True)
+
+        sess_options = onnxruntime.SessionOptions()
+        sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
+        sess = onnxruntime.InferenceSession(onnx_file, sess_options)
+
+        in_tensors = [inputs, valid_length, masked_positions]
+        input_dict = dict((sess.get_inputs()[i].name, in_tensors[i].asnumpy()) for i in range(len(in_tensors)))
+        pred = sess.run(None, input_dict)
+
+        assert_almost_equal(sequence_outputs, pred[0])
+        assert_almost_equal(attension_outputs, pred[1])
+
+    finally:
+        shutil.rmtree(tmp_path)
+
+
+@with_seed()
+@pytest.mark.parametrize('model', ['bert_12_768_12', 'bert_24_1024_16'])
 def test_bert_inference_onnxruntime(tmp_path, model):
     tmp_path = str(tmp_path)
     try: