You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by jx...@apache.org on 2017/12/14 19:22:01 UTC

[incubator-mxnet] branch master updated: [example] fix neural-style end2end example (#9063)

This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 49c60fd  [example] fix neural-style end2end example (#9063)
49c60fd is described below

commit 49c60fd485667fe267310c11c4950bd406430ac2
Author: Yizhi Liu <ja...@gmail.com>
AuthorDate: Thu Dec 14 11:21:57 2017 -0800

    [example] fix neural-style end2end example (#9063)
---
 example/neural-style/end_to_end/README.md          | 15 ++++++---------
 example/neural-style/end_to_end/basic.py           |  4 ----
 example/neural-style/end_to_end/boost_inference.py | 13 +------------
 example/neural-style/end_to_end/boost_train.py     | 13 ++++++-------
 example/neural-style/end_to_end/gen_v3.py          | 10 ----------
 example/neural-style/end_to_end/gen_v4.py          | 10 ----------
 6 files changed, 13 insertions(+), 52 deletions(-)

diff --git a/example/neural-style/end_to_end/README.md b/example/neural-style/end_to_end/README.md
index 2f19bf5..4a228c1 100644
--- a/example/neural-style/end_to_end/README.md
+++ b/example/neural-style/end_to_end/README.md
@@ -1,20 +1,17 @@
 # End to End Neural Art
 
-This is an implementation of blog: [http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html](http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html)
-
-
-We will release a Multi-GPU training code soon.
+Please refer to this [blog](http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html) for details of how it is implemented.
 
 ## How to use
 
 
-1. First use `download.sh` to download pre-trained model and sample inputs
+1. First use `../download.sh` to download pre-trained model and sample inputs.
 
-2. Then prepare training dataset according to the blog
+2. Prepare training dataset. Put image samples to `../data/` (one file for each image sample). The pretrained model here was trained by 26k images sampled from [MIT Place dataset](http://places.csail.mit.edu).
 
-3. Modify [boost_train.py](boost_train.py)
+3. Use `boost_train.py` for training.
 
 ## Pretrained Model
 
-Weight [https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip](https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip)
-Inference [boost_inference.py](boost_inference.py)
+- Model: [https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip](https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip)
+- Inference script: `boost_inference.py`
diff --git a/example/neural-style/end_to_end/basic.py b/example/neural-style/end_to_end/basic.py
index 1763e88..eae64a6 100644
--- a/example/neural-style/end_to_end/basic.py
+++ b/example/neural-style/end_to_end/basic.py
@@ -14,10 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
-import sys
-sys.path.insert(0, "../../mxnet/python/")
-
 import mxnet as mx
 import numpy as np
 import model_vgg19 as vgg
diff --git a/example/neural-style/end_to_end/boost_inference.py b/example/neural-style/end_to_end/boost_inference.py
index 0ec8308..86ab000 100644
--- a/example/neural-style/end_to_end/boost_inference.py
+++ b/example/neural-style/end_to_end/boost_inference.py
@@ -14,10 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
-import sys
-sys.path.insert(0, "../mxnet/python")
-
 import mxnet as mx
 import numpy as np
 
@@ -31,8 +27,6 @@ clip_norm = 1.0 * np.prod(dshape)
 model_prefix = "./model/"
 ctx = mx.gpu(0)
 
-
-
 # generator
 gens = [gen_v4.get_module("g0", dshape, ctx),
         gen_v3.get_module("g1", dshape, ctx),
@@ -41,15 +35,10 @@ gens = [gen_v4.get_module("g0", dshape, ctx),
 for i in range(len(gens)):
     gens[i].load_params("./model/%d/v3_0002-0026000.params" % i)
 
-content_np = data_processing.PreprocessContentImage("../IMG_4343.jpg", min(dshape[2:]), dshape)
+content_np = data_processing.PreprocessContentImage("../input/IMG_4343.jpg", min(dshape[2:]), dshape)
 data = [mx.nd.array(content_np)]
 for i in range(len(gens)):
     gens[i].forward(mx.io.DataBatch([data[-1]], [0]), is_train=False)
     new_img = gens[i].get_outputs()[0]
     data.append(new_img.copyto(mx.cpu()))
     data_processing.SaveImage(new_img.asnumpy(), "out_%d.jpg" % i)
-
-
-import os
-os.system("rm -rf out.zip")
-os.system("zip out.zip out_*")
diff --git a/example/neural-style/end_to_end/boost_train.py b/example/neural-style/end_to_end/boost_train.py
index fa525e7..4f25b43 100644
--- a/example/neural-style/end_to_end/boost_train.py
+++ b/example/neural-style/end_to_end/boost_train.py
@@ -14,10 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
-import sys
-sys.path.insert(0, "../../mxnet/python")
-
 import mxnet as mx
 import numpy as np
 
@@ -27,7 +23,7 @@ import gen_v3
 import gen_v4
 
 # params
-vgg_params = mx.nd.load("./vgg19.params")
+vgg_params = mx.nd.load("../model/vgg19.params")
 style_weight = 1.2
 content_weight = 10
 dshape = (1, 3, 384, 384)
@@ -36,7 +32,7 @@ model_prefix = "v3"
 ctx = mx.gpu(0)
 
 # init style
-style_np = data_processing.PreprocessStyleImage("../starry_night.jpg", shape=dshape)
+style_np = data_processing.PreprocessStyleImage("../input/starry_night.jpg", shape=dshape)
 style_mod = basic.get_style_module("style", dshape, ctx, vgg_params)
 style_mod.forward(mx.io.DataBatch([mx.nd.array(style_np)], [0]), is_train=False)
 style_array = [arr.copyto(mx.cpu()) for arr in style_mod.get_outputs()]
@@ -119,7 +115,10 @@ for i in range(start_epoch, end_epoch):
         loss_grad_array = []
         data_array = []
         path = data_root + file_list[idx]
-        content_np = data_processing.PreprocessContentImage(path, min(dshape[2:]), dshape)
+        try:
+            content_np = data_processing.PreprocessContentImage(path, min(dshape[2:]), dshape)
+        except:
+            logging.warn("Fail to load an input image. Skip.")
         data = mx.nd.array(content_np)
         data_array.append(data)
         # get content
diff --git a/example/neural-style/end_to_end/gen_v3.py b/example/neural-style/end_to_end/gen_v3.py
index 7962e68..a11e598 100644
--- a/example/neural-style/end_to_end/gen_v3.py
+++ b/example/neural-style/end_to_end/gen_v3.py
@@ -18,14 +18,6 @@
 
 # coding: utf-8
 
-# In[1]:
-
-import sys
-sys.path.insert(0, "../../mxnet/python")
-
-
-# In[2]:
-
 import mxnet as mx
 import numpy as np
 
@@ -48,8 +40,6 @@ def Deconv(data, num_filter, im_hw, kernel=(7, 7), pad=(2, 2), stride=(2, 2), cr
         sym = mx.sym.Activation(sym, act_type="tanh")
     return sym
 
-# In[70]:
-
 def get_generator(prefix, im_hw):
     data = mx.sym.Variable("%s_data" % prefix)
     conv1 = Conv(data, 64) # 192
diff --git a/example/neural-style/end_to_end/gen_v4.py b/example/neural-style/end_to_end/gen_v4.py
index fb4e6d1..30f534c 100644
--- a/example/neural-style/end_to_end/gen_v4.py
+++ b/example/neural-style/end_to_end/gen_v4.py
@@ -18,14 +18,6 @@
 
 # coding: utf-8
 
-# In[1]:
-
-import sys
-sys.path.insert(0, "../mxnet/python")
-
-
-# In[2]:
-
 import mxnet as mx
 import numpy as np
 
@@ -46,8 +38,6 @@ def Deconv(data, num_filter, kernel=(6, 6), pad=(2, 2), stride=(2, 2), out=False
         sym = mx.sym.Activation(sym, act_type="tanh")
     return sym
 
-# In[70]:
-
 def get_generator(prefix, im_hw):
     data = mx.sym.Variable("%s_data" % prefix)
 

-- 
To stop receiving notification emails like this one, please contact
['"commits@mxnet.apache.org" <co...@mxnet.apache.org>'].