You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/08/27 04:25:44 UTC

[GitHub] bowenroom opened a new issue #12366: MXNetError: [12:17:04] src/imperative/imperative.cc:285: Check failed: !AGInfo::IsNone(*i) Cannot differentiate node because it is not in a computational graph.

bowenroom opened a new issue #12366: MXNetError: [12:17:04] src/imperative/imperative.cc:285: Check failed: !AGInfo::IsNone(*i) Cannot differentiate node because it is not in a computational graph.
URL: https://github.com/apache/incubator-mxnet/issues/12366
 
 
    #  have an mxnet error ,can anyone help me to fix it ?thanks a lot!!
   
   from __future__ import print_function
   import matplotlib as mpl
   from matplotlib import pyplot as plt
   import mxnet as mx
   from mxnet import gluon, autograd, nd
   from mxnet.gluon import nn
   import numpy as np
   
   # In[2]:
   
   
   # Hyper Parameters
   BATCH_SIZE = 64
   LR_G = 0.0001  # learning rate for generator
   LR_D = 0.0001  # learning rate for discriminator
   N_IDEAS = 55  # think of this as number of ideas for generating an art work (Generator)
   ART_COMPONENTS = 15  # it could be total point G can draw in the canvas
   PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)])
   PAINT_POINTS = nd.array(PAINT_POINTS)  # 64*15 为所要绘制的所有的点的数目
   
   # In[3]:
   
   
   # show our beautiful painting range
   plt.plot(PAINT_POINTS[0].asnumpy(), (2 * PAINT_POINTS[0] ** 2 + 1).asnumpy(), c='#74BCFF', lw=3, label='upper bound')
   plt.plot(PAINT_POINTS[0].asnumpy(), (1 * PAINT_POINTS[0] ** 2 + 0).asnumpy(), c='#FF9359', lw=3, label='lower bound')
   plt.legend(loc='upper right')
   plt.show()
   print(PAINT_POINTS.shape)
   
   
   # In[4]:
   def artist_works():  # painting from the famous artist (real target)
       a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]
       a = nd.array(a)
   
       paintings = a * PAINT_POINTS[0] ** 2 + (a - 1)
       # paintings = torch.from_numpy(paintings).float()
       return paintings
   
   
    
   
    
   G = nn.Sequential()
   with G.name_scope():
       G.add(nn.Dense(128, activation='relu'), nn.Dense(ART_COMPONENTS))
    
   D = nn.Sequential()
   with D.name_scope():
       D.add(nn.Dense(128, activation='relu'), nn.Dense(1, activation='sigmoid'))
   
   ctx = mx.cpu()
   # initialize the generator and the discriminator
   G.initialize(mx.init.Normal(0.02), ctx=ctx)
   D.initialize(mx.init.Normal(0.02), ctx=ctx)
   loss = gluon.loss.SoftmaxCrossEntropyLoss()
   # trainer for the generator and the discriminator
   trainerG = gluon.Trainer(G.collect_params(), 'adam', {'learning_rate': LR_G})
   trainerD = gluon.Trainer(D.collect_params(), 'adam', {'learning_rate': LR_D})
   
   # In[6]:
   
   for step in range(10000):
       artist_paintings = artist_works()  # real painting from artist shape:64*15
       G_ideas = nd.random_normal(shape=(BATCH_SIZE, N_IDEAS))
       G_paintings = G(G_ideas)  # fake painting from G (random ideas)
   
       prob_artist0 = D(artist_paintings)  # D try to increase this prob
       prob_artist1 = D(G_paintings)  # D try to decreases this prob
   
   
       # start the process of training
       with autograd.record():
           D_loss = -nd.mean(nd.log(prob_artist0) + nd.log((1 - prob_artist1)))
           D_loss.backward()
       trainerD.step(N_IDEAS)
   
       with autograd.record():
           G_loss = nd.mean(nd.log(1. - prob_artist1))
           G_loss.backward()
       trainerG.step(N_IDEAS)
   
       if step % 1000 == 0:  # plotting
           plt.cla()
           plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='Generated painting', )
           plt.plot(PAINT_POINTS[0].asnumpy(), (2 * PAINT_POINTS[0] ** 2 + 1).asnumpy(), c='#74BCFF', lw=3,
                    label='upper bound')
           plt.plot(PAINT_POINTS[0].asnumpy(), (1 * PAINT_POINTS[0] ** 2 + 0).asnumpy(), c='#FF9359', lw=3,
                    label='lower bound')
           plt.legend(loc='upper right')
           plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(),
                    fontdict={'size': 15})
           plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 15})
           plt.ylim((0, 3));
           plt.legend(loc='upper right', fontsize=12);
           plt.draw();
           plt.pause(0.01)
           plt.show()
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services