You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ta...@apache.org on 2020/04/22 03:18:15 UTC

[incubator-mxnet] branch v1.7.x updated: change error tolerance for bf16 bn (#18110)

This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
     new 6e956fd  change error tolerance for bf16 bn (#18110)
6e956fd is described below

commit 6e956fd2b78fcd20e732a1f1d915da630ea1d999
Author: rongzha1 <ro...@intel.com>
AuthorDate: Wed Apr 22 11:17:17 2020 +0800

    change error tolerance for bf16 bn (#18110)
---
 tests/python/mkl/test_bf16_operator.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/python/mkl/test_bf16_operator.py b/tests/python/mkl/test_bf16_operator.py
index e4f4a93..b275c96 100644
--- a/tests/python/mkl/test_bf16_operator.py
+++ b/tests/python/mkl/test_bf16_operator.py
@@ -126,8 +126,8 @@ def test_bf16_bn():
     bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
 
     bn_bf16 = mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-    check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 32, 28, 28), bf16_use_fp32_params=True, etol=1e-3)
-    check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-3)
+    check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 32, 28, 28), bf16_use_fp32_params=True, etol=1e-2)
+    check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(32, 16, 64, 64), bf16_use_fp32_params=True, etol=1e-2)
 
 @with_seed()
 def test_bf16_conv():
@@ -278,7 +278,7 @@ def test_bf16_fallback():
     bn_params = {"eps": 2e-05, "fix_gamma": False, "use_global_stats": True, "name": "bn"}
     bn_fp32 = mx.sym.BatchNorm(data_sym_fp32, **bn_params)
     bn_bf16=mx.sym.BatchNorm(data_sym_bf16, **bn_params)
-    check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-3)
+    check_operator_accuracy(sym_fp32=bn_fp32, sym_bf16=bn_bf16, data_shape=(3, 32, 28, 28, 3), bf16_use_fp32_params=True, etol=1e-2)
 
     conv_params = {"kernel": (3, 3, 3), "num_filter": 128, "pad": (1, 1, 1), "stride": (1, 1, 1), "no_bias": True, "name": "conv"}
     conv_fp32 = mx.sym.Convolution(data_sym_fp32, **conv_params)