You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/06/06 23:49:18 UTC
[GitHub] RoacherM commented on issue #11177: mxnet.base.MXNetError
RoacherM commented on issue #11177: mxnet.base.MXNetError
URL: https://github.com/apache/incubator-mxnet/issues/11177#issuecomment-395248719
hello,@marcoabreu .My network design is here:
class ConcatNet(nn.HybridBlock):
def __init__(self,net1,net2,**kwargs):
super(ConcatNet,self).__init__(**kwargs)
self.net1 = nn.HybridSequential()
self.net1.add(net1)
self.net1.add(nn.GlobalAvgPool2D())
self.net2 = nn.HybridSequential()
self.net2.add(net2)
self.net2.add(nn.GlobalAvgPool2D())
def hybrid_forward(self,F,x1,x2):
return F.concat(*[self.net1(x1),self.net2(x2)])
class OneNet(nn.HybridBlock):
def __init__(self,features,output,**kwargs):
super(OneNet,self).__init__(**kwargs)
self.features = features
self.output = output
def hybrid_forward(self,F,x1,x2):
return self.output(self.features(x1,x2))
class Net():
def __init__(self,ctx,nameparams=None):
#(255,299,299)
inception = vision.inception_v3(pretrained=True,ctx=ctx).features
#(255,224,224)
resnet = vision.densnet121(pretrained=True,ctx=ctx).features
self.features = ConcatNet(resnet,inception)
self.output = self.__get_output(ctx,nameparams)
self.net = OneNet(self.features,self.output)
def __get_output(self,ctx,ParamsName=None):
net = nn.HybridSequential("output")
with net.name_scope():
#可以添加batchnorm等层
net.add(nn.Dense(256,activation='relu'))
net.add(nn.Dropout(.5))
#分类
net.add(nn.Dense(2))
if ParamsName is not None:
net.collect_params().load(ParamsName,ctx)
else:
net.initialize(init = init.Xavier(),ctx=ctx)
return net
class Pre():
def __init__(self,nameparams,idx,ctx=0):
self.idx = idx
if ctx == 0:
self.ctx = mx.cpu()
if ctx == 1:
self.ctx = mx.gpu()
self.net = Net(self.ctx,nameparams=nameparams).net
self.Timg = transform_test
def PreImg(self,img):
imgs = self.Timg(img,None)
out = nd.softmax(self.net(nd.reshape(imgs[0],(1,3,224,224)).as_in_context(self.ctx),nd.reshape(imgs[1],(1,3,299,299)).as_in_context(self.ctx))).asnumpy()
return self.idx[np.where(out == out.max())[1][0]]
def PreName(self,Name):
img = image.imread(Name)
return self.PreImg(img)
def transform_train(data, label):
im1 = image.imresize(data,224,224).astype('float32')
im2 = image.imresize(data,299,299).astype('float32')
# im1 = image.imresize(data.astype('float32') / 255, 224, 224)
# im2 = image.imresize(data.astype('float32')/255,299,299)
auglist1 = image.CreateAugmenter(data_shape=(3, 224, 224), resize=0,
rand_crop=True, rand_resize=True, rand_mirror=True,
mean=np.array([0.485, 0.456, 0.406]), std=np.array([0.229, 0.224, 0.225]),
brightness=0.125, contrast=0.5,
saturation=0, hue=0,
pca_noise=0, rand_gray=1, inter_method=2)
auglist2 = image.CreateAugmenter(data_shape=(3,299,299), resize=0,
rand_crop=True, rand_resize=True, rand_mirror=True,
mean=np.array([0.485, 0.456, 0.406]), std=np.array([0.229, 0.224, 0.225]),
brightness=0.125, contrast=0.5,
saturation=0, hue=0,
pca_noise=0, rand_gray=1, inter_method=2)
for aug in auglist1:
im1 = aug(im1)
for aug in auglist2:
im2 = aug(im2)
# (0,1,2)-(2,0,1)
im1 = nd.transpose(im1, (2,0,1))
im2 = nd.transpose(im2, (2,0,1))
return (im1,im2,nd.array([label]).asscalar().astype('float32'))
def transform_test(data, label):
im1 = image.imresize(data,224,224).astype('float32')
im2 = image.imresize(data,299,299).astype('float32')
# im1 = image.imresize(data.astype('float32') / 255, 224, 224)
# im2 = image.imresize(data.astype('float32') / 255, 299, 299)
auglist1 = image.CreateAugmenter(data_shape=(3,224,224),
mean=np.array([0.485, 0.456, 0.406]),
std=np.array([0.229, 0.224, 0.225]))
auglist2 = image.CreateAugmenter(data_shape=(3,299,299),
mean=np.array([0.485, 0.456, 0.406]),
std=np.array([0.229, 0.224, 0.225]))
for aug in auglist1:
im1 = aug(im1)
for aug in auglist2:
im2 = aug(im2)
im1 = nd.transpose(im1, (2,0,1))
im2 = nd.transpose(im2, (2,0,1))
return (im1,im2,nd.array([label]).asscalar().astype('float32'))
and when i run my code:
def SaveNd(data,net,name):
x =[]
y =[]
for fear1,fear2,label in tqdm(data):
fear1 = fear1.as_in_context(mx.gpu())
fear2 = fear2.as_in_context(mx.gpu())
# fear3 = fear3.as_in_context(mx.gpu())
out = net(fear1,fear2).as_in_context(mx.cpu())
x.append(out)
y.append(label)
x = nd.concat(*x,dim=0)
y = nd.concat(*y,dim=0)
nd.save(name,[x,y])
SaveNd(train_data,net,'train.nd')
SaveNd(valid_data,net,'valid.nd')
SaveNd(train_valid_data,net,'input.nd')
i come across the error:
File "C:\Python_3.6.2\lib\site-packages\mxnet\base.py", line 210, in check_call
raise MXNetError(py_str(_LIB.MXGetLastError()))
mxnet.base.MXNetError: Error in operator pool0_fwd: [02:02:07] C:\Jenkins\workspace\mxnet\mxnet\src\operator\nn\pooling.cc:99: Check failed: dshape.ndim() >= 3U (2 vs. 3) Pooling: Input data should be 3D in (batch, channel, x) Or 4D in (batch, channel, y, x) Or 5D in (batch, channel, d, y, x)
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
users@infra.apache.org
With regards,
Apache Git Services