You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2017/12/14 19:22:01 UTC

[GitHub] piiswrong closed pull request #9063: [example] fix neural-style end2end example

piiswrong closed pull request #9063: [example] fix neural-style end2end example
URL: https://github.com/apache/incubator-mxnet/pull/9063
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/example/neural-style/end_to_end/README.md b/example/neural-style/end_to_end/README.md
index 2f19bf51ab..4a228c199b 100644
--- a/example/neural-style/end_to_end/README.md
+++ b/example/neural-style/end_to_end/README.md
@@ -1,20 +1,17 @@
 # End to End Neural Art
 
-This is an implementation of blog: [http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html](http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html)
-
-
-We will release a Multi-GPU training code soon.
+Please refer to this [blog](http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html) for details of how it is implemented.
 
 ## How to use
 
 
-1. First use `download.sh` to download pre-trained model and sample inputs
+1. First use `../download.sh` to download pre-trained model and sample inputs.
 
-2. Then prepare training dataset according to the blog
+2. Prepare training dataset. Put image samples to `../data/` (one file for each image sample). The pretrained model here was trained by 26k images sampled from [MIT Place dataset](http://places.csail.mit.edu).
 
-3. Modify [boost_train.py](boost_train.py)
+3. Use `boost_train.py` for training.
 
 ## Pretrained Model
 
-Weight [https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip](https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip)
-Inference [boost_inference.py](boost_inference.py)
+- Model: [https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip](https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip)
+- Inference script: `boost_inference.py`
diff --git a/example/neural-style/end_to_end/basic.py b/example/neural-style/end_to_end/basic.py
index 1763e884b9..eae64a6e68 100644
--- a/example/neural-style/end_to_end/basic.py
+++ b/example/neural-style/end_to_end/basic.py
@@ -14,10 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
-import sys
-sys.path.insert(0, "../../mxnet/python/")
-
 import mxnet as mx
 import numpy as np
 import model_vgg19 as vgg
diff --git a/example/neural-style/end_to_end/boost_inference.py b/example/neural-style/end_to_end/boost_inference.py
index 0ec8308f30..86ab000b08 100644
--- a/example/neural-style/end_to_end/boost_inference.py
+++ b/example/neural-style/end_to_end/boost_inference.py
@@ -14,10 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
-import sys
-sys.path.insert(0, "../mxnet/python")
-
 import mxnet as mx
 import numpy as np
 
@@ -31,8 +27,6 @@
 model_prefix = "./model/"
 ctx = mx.gpu(0)
 
-
-
 # generator
 gens = [gen_v4.get_module("g0", dshape, ctx),
         gen_v3.get_module("g1", dshape, ctx),
@@ -41,15 +35,10 @@
 for i in range(len(gens)):
     gens[i].load_params("./model/%d/v3_0002-0026000.params" % i)
 
-content_np = data_processing.PreprocessContentImage("../IMG_4343.jpg", min(dshape[2:]), dshape)
+content_np = data_processing.PreprocessContentImage("../input/IMG_4343.jpg", min(dshape[2:]), dshape)
 data = [mx.nd.array(content_np)]
 for i in range(len(gens)):
     gens[i].forward(mx.io.DataBatch([data[-1]], [0]), is_train=False)
     new_img = gens[i].get_outputs()[0]
     data.append(new_img.copyto(mx.cpu()))
     data_processing.SaveImage(new_img.asnumpy(), "out_%d.jpg" % i)
-
-
-import os
-os.system("rm -rf out.zip")
-os.system("zip out.zip out_*")
diff --git a/example/neural-style/end_to_end/boost_train.py b/example/neural-style/end_to_end/boost_train.py
index fa525e7e52..4f25b4304c 100644
--- a/example/neural-style/end_to_end/boost_train.py
+++ b/example/neural-style/end_to_end/boost_train.py
@@ -14,10 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-
-import sys
-sys.path.insert(0, "../../mxnet/python")
-
 import mxnet as mx
 import numpy as np
 
@@ -27,7 +23,7 @@
 import gen_v4
 
 # params
-vgg_params = mx.nd.load("./vgg19.params")
+vgg_params = mx.nd.load("../model/vgg19.params")
 style_weight = 1.2
 content_weight = 10
 dshape = (1, 3, 384, 384)
@@ -36,7 +32,7 @@
 ctx = mx.gpu(0)
 
 # init style
-style_np = data_processing.PreprocessStyleImage("../starry_night.jpg", shape=dshape)
+style_np = data_processing.PreprocessStyleImage("../input/starry_night.jpg", shape=dshape)
 style_mod = basic.get_style_module("style", dshape, ctx, vgg_params)
 style_mod.forward(mx.io.DataBatch([mx.nd.array(style_np)], [0]), is_train=False)
 style_array = [arr.copyto(mx.cpu()) for arr in style_mod.get_outputs()]
@@ -119,7 +115,10 @@ def get_tv_grad_executor(img, ctx, tv_weight):
         loss_grad_array = []
         data_array = []
         path = data_root + file_list[idx]
-        content_np = data_processing.PreprocessContentImage(path, min(dshape[2:]), dshape)
+        try:
+            content_np = data_processing.PreprocessContentImage(path, min(dshape[2:]), dshape)
+        except:
+            logging.warn("Fail to load an input image. Skip.")
         data = mx.nd.array(content_np)
         data_array.append(data)
         # get content
diff --git a/example/neural-style/end_to_end/gen_v3.py b/example/neural-style/end_to_end/gen_v3.py
index 7962e68da2..a11e5989b0 100644
--- a/example/neural-style/end_to_end/gen_v3.py
+++ b/example/neural-style/end_to_end/gen_v3.py
@@ -18,14 +18,6 @@
 
 # coding: utf-8
 
-# In[1]:
-
-import sys
-sys.path.insert(0, "../../mxnet/python")
-
-
-# In[2]:
-
 import mxnet as mx
 import numpy as np
 
@@ -48,8 +40,6 @@ def Deconv(data, num_filter, im_hw, kernel=(7, 7), pad=(2, 2), stride=(2, 2), cr
         sym = mx.sym.Activation(sym, act_type="tanh")
     return sym
 
-# In[70]:
-
 def get_generator(prefix, im_hw):
     data = mx.sym.Variable("%s_data" % prefix)
     conv1 = Conv(data, 64) # 192
diff --git a/example/neural-style/end_to_end/gen_v4.py b/example/neural-style/end_to_end/gen_v4.py
index fb4e6d1e16..30f534cd76 100644
--- a/example/neural-style/end_to_end/gen_v4.py
+++ b/example/neural-style/end_to_end/gen_v4.py
@@ -18,14 +18,6 @@
 
 # coding: utf-8
 
-# In[1]:
-
-import sys
-sys.path.insert(0, "../mxnet/python")
-
-
-# In[2]:
-
 import mxnet as mx
 import numpy as np
 
@@ -46,8 +38,6 @@ def Deconv(data, num_filter, kernel=(6, 6), pad=(2, 2), stride=(2, 2), out=False
         sym = mx.sym.Activation(sym, act_type="tanh")
     return sym
 
-# In[70]:
-
 def get_generator(prefix, im_hw):
     data = mx.sym.Variable("%s_data" % prefix)
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services