You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2019/01/23 23:21:32 UTC

[GitHub] Fiend1213 opened a new issue #13976: Problem of exporting FP16 SyncBN model.

Fiend1213 opened a new issue #13976: Problem of exporting FP16 SyncBN model.
URL: https://github.com/apache/incubator-mxnet/issues/13976
 
 
   ## Description
   This is a problem when exporting fp16 model containing SyncBN.
   
   ```
   import mxnet as mx
   
   mx.random.seed(42)
   
   def data_xform(data):
       """Move channel axis to the beginning, cast to float32, and normalize to [0, 1]."""
       return mx.nd.moveaxis(data, 2, 0).astype('float32') / 255
   
   train_data = mx.gluon.data.vision.MNIST(train=True).transform_first(data_xform)
   val_data = mx.gluon.data.vision.MNIST(train=False).transform_first(data_xform)
   
   batch_size = 2000
   train_loader = mx.gluon.data.DataLoader(train_data, shuffle=True, batch_size=batch_size)
   val_loader = mx.gluon.data.DataLoader(val_data, shuffle=False, batch_size=batch_size)
   
   net = mx.gluon.nn.HybridSequential()
   with net.name_scope():
       net.add(mx.gluon.nn.Conv2D(64, (3, 3)))
       net.add(mx.gluon.contrib.nn.SyncBatchNorm())
       net.add(mx.gluon.nn.Conv2D(64, (3, 3)))
       net.add(mx.gluon.contrib.nn.SyncBatchNorm())
       net.add(mx.gluon.nn.Dense(128))
       net.add(mx.gluon.nn.Dense(10))
       net.hybridize()
       net.cast('float16')
   print('finish build the network')
   
   ctx =  [mx.gpu(int(id)) for id in [0,1,2,3,4,5,6,7]]
   net.initialize(mx.init.Normal(0.01), ctx=ctx)
   print('finish initializing')
   
   trainer = mx.gluon.Trainer(
       params=net.collect_params(),
       optimizer='sgd',
       optimizer_params={'learning_rate': 0.04, 'multi_precision':True},
   )
   
   loss_function = mx.gluon.loss.SoftmaxCrossEntropyLoss()
   metric = mx.metric.Accuracy()
   
   num_epochs = 10
   for epoch in range(num_epochs):
       print('start training')
       for inputs, labels in train_loader:
           data = mx.gluon.utils.split_and_load(inputs, ctx_list=ctx, batch_axis=0)
           label = mx.gluon.utils.split_and_load(labels, ctx_list=ctx, batch_axis=0)
           losses = []
           with mx.autograd.record():
               for X, Y in zip(data, label):
                   outputs = net(X.astype('float16'))
                   loss = loss_function(outputs.astype('float32'), Y)
                   losses.append(loss)
   
           for l in losses:
               l.backward()
           trainer.step(batch_size=inputs.shape[0])
   
       net.export('mnist_syncbn_fp16.params')
   ```
   
   ## Error Message:
   ```
   File "/home/ubuntu/Workspace/incubator-mxnet/python/mxnet/gluon/block.py", line 900, in export
       ndarray.save('%s-%04d.params'%(path, epoch), arg_dict)
     File "/home/ubuntu/Workspace/incubator-mxnet/python/mxnet/ndarray/utils.py", line 273, in save
       keys))
     File "/home/ubuntu/Workspace/incubator-mxnet/python/mxnet/base.py", line 255, in check_call
       raise MXNetError(py_str(_LIB.MXGetLastError()))
   mxnet.base.MXNetError: [18:43:24] include/mxnet/././tensor_blob.h:203: Check failed: mshadow::DataType<DType>::kFlag == type_flag_ TBlob.get_with_shape: data type do not match specified type.Expected: 2 v.s. given 0
   ```

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services