You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@singa.apache.org by GitBox <gi...@apache.org> on 2020/06/02 09:11:01 UTC

[GitHub] [singa] Shashankwer edited a comment on issue #707: Layer mismatch causes session to to terminate abruptly

Shashankwer edited a comment on issue #707:
URL: https://github.com/apache/singa/issues/707#issuecomment-637404896


   Hi, 
   
   Issue reported here is for handling the error on the python API side and is particularly noticed for autograd.backward function. 
   
   Consider the below example
   `from singa import autograd
   from singa import module
   from singa import opt
   from singa import tensor
   from singa import device
   
   class MLP():
       def __init__(self):
           self.linear1 = autograd.Linear(3, 4)
           self.linear2 = autograd.Linear(4, 3)
       def forward(self,x):
           y = self.linear1(x)
           return self.linear2(y)
       def loss(self, out, ty):
           return autograd.softmax_cross_entropy(out, ty)
       def optim(self, loss):
           self.optimizer.backward_and_update(loss)
       def set_optimizer(self, optimizer):
           self.optimizer = optimizer
   
   def train(model, x, t, dev=device.get_default_device(), epochs=100):
       for i in range(epochs):
           y = model.forward(x)
           loss = autograd.mse_loss(y, t)
           print("loss: ", loss)
           sgd = opt.SGD()
           for p, gp in autograd.backward(loss):
               sgd.update(p, gp)
           sgd.step()
   
   
   if __name__ == '__main__':
       x=tensor.Tensor((3,3)).gaussian(1,1)
       y=tensor.Tensor((3,3)).gaussian(1,1)
       
       autograd.training = True
       m = MLP()
       sgd = opt.SGD()
       m.set_optimizer(sgd)
       out = m.forward(x)
       loss = m.loss(out, y)
       m.optim(loss)
       print(loss)
       train(m,x,y)`
   
   The above code will execute without any issues. However if we change the dimension of output tensor such that it does not match the model constructed, the error is noticed. For example 
   
   `from singa import autograd
   from singa import module
   from singa import opt
   from singa import tensor
   from singa import device
   
   class MLP():
       def __init__(self):
           self.linear1 = autograd.Linear(3, 4)
           self.linear2 = autograd.Linear(4, 3)
       def forward(self,x):
           y = self.linear1(x)
           return self.linear2(y)
       def loss(self, out, ty):
           return autograd.softmax_cross_entropy(out, ty)
       def optim(self, loss):
           self.optimizer.backward_and_update(loss)
       def set_optimizer(self, optimizer):
           self.optimizer = optimizer
   
   def train(model, x, t, dev=device.get_default_device(), epochs=100):
       for i in range(epochs):
           y = model.forward(x)
           loss = autograd.mse_loss(y, t)
           print("loss: ", loss)
           sgd = opt.SGD()
           for p, gp in autograd.backward(loss):
               sgd.update(p, gp)
           sgd.step()
   
   
   if __name__ == '__main__':
       x=tensor.Tensor((3,3)).gaussian(1,1)
       y=tensor.Tensor((3,4)).gaussian(1,1)
       
       autograd.training = True
       m = MLP()
       sgd = opt.SGD()
       m.set_optimizer(sgd)
       out = m.forward(x)
       loss = m.loss(out, y)
       m.optim(loss)
       print(loss)
       train(m,x,y)`


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org