You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by je...@apache.org on 2021/08/11 05:59:45 UTC

[incubator-mxnet] branch v1.x updated: [v1.x]Update git repo reference (#20510)

This is an automated email from the ASF dual-hosted git repository.

jevans pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new 9417bbd  [v1.x]Update git repo reference (#20510)
9417bbd is described below

commit 9417bbdde18f59daf825403bddf783753999776e
Author: waytrue17 <52...@users.noreply.github.com>
AuthorDate: Tue Aug 10 22:58:02 2021 -0700

    [v1.x]Update git repo reference (#20510)
    
    * update git repo url
    
    * remove incubator
    
    * fix typo
    
    Co-authored-by: Wei Chu <we...@amazon.com>
---
 3rdparty/mshadow/README.md                                 |  2 +-
 NEWS.md                                                    |  8 ++++----
 R-package/R/zzz.R                                          |  2 +-
 R-package/vignettes/CallbackFunction.Rmd                   |  2 +-
 R-package/vignettes/CustomIterator.Rmd                     |  2 +-
 R-package/vignettes/mnistCompetition.Rmd                   |  2 +-
 README.md                                                  |  4 ++--
 docker/Dockerfiles/Dockerfile.in.lib.cpu                   |  2 +-
 docker/Dockerfiles/Dockerfile.in.lib.gpu                   |  2 +-
 docs/static_site/src/pages/api/faq/caffe.md                |  4 ++--
 docs/static_site/src/pages/api/faq/cloud.md                |  4 ++--
 docs/static_site/src/pages/api/faq/multi_devices.md        |  6 +++---
 docs/static_site/src/pages/api/faq/new_op.md               |  6 +++---
 docs/static_site/src/pages/api/faq/perf.md                 | 10 +++++-----
 docs/static_site/src/pages/api/faq/recordio.md             |  4 ++--
 docs/static_site/src/pages/api/faq/s3_integration.md       |  2 +-
 docs/static_site/src/pages/api/faq/smart_device.md         |  6 +++---
 docs/static_site/src/pages/api/faq/visualize_graph.md      |  2 +-
 .../src/pages/api/r/docs/tutorials/callback_function.md    |  2 +-
 .../src/pages/api/r/docs/tutorials/custom_iterator.md      |  2 +-
 .../api/r/docs/tutorials/five_minutes_neural_network.md    |  2 +-
 .../src/pages/api/r/docs/tutorials/mnist_competition.md    |  2 +-
 .../src/pages/api/scala/docs/tutorials/char_lstm.md        |  4 ++--
 .../src/pages/api/scala/docs/tutorials/mnist.md            |  6 +++---
 example/README.md                                          |  8 ++++----
 example/caffe/README.md                                    |  4 ++--
 example/image-classification/README.md                     | 14 +++++++-------
 example/kaggle-ndsb1/training_curves.py                    |  2 +-
 example/rcnn/README.md                                     |  2 +-
 example/reinforcement-learning/dqn/operators.py            |  2 +-
 example/ssd/README.md                                      |  2 +-
 julia/docs/src/index.md                                    |  2 +-
 julia/docs/src/user-guide/install.md                       |  2 +-
 perl-package/AI-MXNet/lib/AI/MXNet.pm                      |  2 +-
 perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm          |  2 +-
 plugin/caffe/README.md                                     |  4 ++--
 .../scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala   |  2 +-
 .../scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala  |  2 +-
 src/engine/naive_engine.cc                                 |  2 +-
 src/engine/threaded_engine.h                               |  2 +-
 src/operator/svm_output.cc                                 |  2 +-
 tools/coreml/converter/_layers.py                          |  4 ++--
 42 files changed, 74 insertions(+), 74 deletions(-)

diff --git a/3rdparty/mshadow/README.md b/3rdparty/mshadow/README.md
index 6ff6cad..a645ef6 100644
--- a/3rdparty/mshadow/README.md
+++ b/3rdparty/mshadow/README.md
@@ -50,5 +50,5 @@ Version
 
 Projects Using MShadow
 ----------------------
-* [MXNet: Efficient and Flexible Distributed Deep Learning Framework](https://github.com/dmlc/mxnet)
+* [MXNet: Efficient and Flexible Distributed Deep Learning Framework](https://github.com/apache/mxnet)
 * [CXXNet: A lightweight  C++ based deep learnig framework](https://github.com/dmlc/cxxnet)
diff --git a/NEWS.md b/NEWS.md
index c909d1b..7c16eaa 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -3572,7 +3572,7 @@ For more information and examples, see [full release notes](https://cwiki.apache
   - ImageRecordIter now stores data in pinned memory to improve GPU memcopy speed.
 ### Bugfixes
   - Cython interface is fixed. `make cython` and `python setup.py install --with-cython` should install the cython interface and reduce overhead in applications that use imperative/bucketing.
-  - Fixed various bugs in Faster-RCNN example: https://github.com/dmlc/mxnet/pull/6486
+  - Fixed various bugs in Faster-RCNN example: https://github.com/apache/mxnet/pull/6486
   - Fixed various bugs in SSD example.
   - Fixed `out` argument not working for `zeros`, `ones`, `full`, etc.
   - `expand_dims` now supports backward shape inference.
@@ -3648,9 +3648,9 @@ This is the last release before the NNVM refactor.
 - Support CuDNN v5 by @antinucleon
 - More applications
   - Speech recognition by @yzhang87
-  - [Neural art](https://github.com/dmlc/mxnet/tree/master/example/neural-style) by @antinucleon
-  - [Detection](https://github.com/dmlc/mxnet/tree/master/example/rcnn), RCNN bt @precedenceguo
-  - [Segmentation](https://github.com/dmlc/mxnet/tree/master/example/fcn-xs), FCN by @tornadomeet
+  - [Neural art](https://github.com/apache/mxnet/tree/v0.7.0/example/neural-style) by @antinucleon
+  - [Detection](https://github.com/apache/mxnet/tree/v0.7.0/example/rcnn), RCNN bt @precedenceguo
+  - [Segmentation](https://github.com/apache/mxnet/tree/v0.7.0/example/fcn-xs), FCN by @tornadomeet
   - [Face identification](https://github.com/tornadomeet/mxnet-face) by @tornadomeet
   - More on the example
 
diff --git a/R-package/R/zzz.R b/R-package/R/zzz.R
index 1b18597..8ee4243 100644
--- a/R-package/R/zzz.R
+++ b/R-package/R/zzz.R
@@ -63,7 +63,7 @@ NULL
   if (!interactive() || stats::runif(1) > 0.1) return()
 
   tips <- c(
-    "Need help? Feel free to open an issue on https://github.com/dmlc/mxnet/issues",
+    "Need help? Feel free to open an issue on https://github.com/apache/mxnet/issues",
     "For more documents, please visit https://mxnet.io",
     "Use suppressPackageStartupMessages() to eliminate package startup messages."
   )
diff --git a/R-package/vignettes/CallbackFunction.Rmd b/R-package/vignettes/CallbackFunction.Rmd
index 12b7e28..0d3ad44 100644
--- a/R-package/vignettes/CallbackFunction.Rmd
+++ b/R-package/vignettes/CallbackFunction.Rmd
@@ -75,7 +75,7 @@ head(logger$eval)
 ## How to write your own callback functions
 
 
-You can find the source code for two callback functions from [here](https://github.com/dmlc/mxnet/blob/master/R-package/R/callback.R) and they can be used as your template:
+You can find the source code for two callback functions from [here](https://github.com/apache/mxnet/blob/v1.x/R-package/R/callback.R) and they can be used as your template:
 
 Basically, all callback functions follow the structure below:
 
diff --git a/R-package/vignettes/CustomIterator.Rmd b/R-package/vignettes/CustomIterator.Rmd
index b5a6576..d6dd441 100644
--- a/R-package/vignettes/CustomIterator.Rmd
+++ b/R-package/vignettes/CustomIterator.Rmd
@@ -19,7 +19,7 @@ You'll get two files, `mnist_train.csv` that contains 60.000 examples of hand wr
 
 ## Custom CSV Iterator
 
-Next we are going to create a custom CSV Iterator based on the [C++ CSVIterator class](https://github.com/dmlc/mxnet/blob/master/src/io/iter_csv.cc).
+Next we are going to create a custom CSV Iterator based on the [C++ CSVIterator class](https://github.com/apache/mxnet/blob/master/src/io/iter_csv.cc).
 
 For that we are going to use the R function `mx.io.CSVIter` as a base class. This class has as parameters `data.csv, data.shape, batch.size` and two main functions, `iter.next()` that calls the iterator in the next batch of data and `value()` that returns the train data and the label.
 
diff --git a/R-package/vignettes/mnistCompetition.Rmd b/R-package/vignettes/mnistCompetition.Rmd
index 055f1ae..d06529c 100644
--- a/R-package/vignettes/mnistCompetition.Rmd
+++ b/R-package/vignettes/mnistCompetition.Rmd
@@ -1,7 +1,7 @@
 # Handwritten Digits Classification Competition
 
 [MNIST](http://yann.lecun.com/exdb/mnist/) is a handwritten digits image data set created by Yann LeCun. Every digit is represented by a 28x28 image. It has become a standard data set to test classifiers on simple image input. Neural network is no doubt a strong model for image classification tasks. There's a [long-term hosted competition](https://www.kaggle.com/c/digit-recognizer) on Kaggle using this data set.
-We will present the basic usage of [mxnet](https://github.com/dmlc/mxnet/tree/master/R-package) to compete in this challenge.
+We will present the basic usage of [mxnet](https://github.com/apache/mxnet/tree/v1.x/R-package) to compete in this challenge.
 
 ## Data Loading
 
diff --git a/README.md b/README.md
index 26b5be7..d0ffd39 100644
--- a/README.md
+++ b/README.md
@@ -81,10 +81,10 @@ What's New
 * [0.12.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/0.12.0) - MXNet 0.12.0 Release.
 * [0.11.0 Release](https://github.com/apache/incubator-mxnet/releases/tag/0.11.0) - MXNet 0.11.0 Release.
 * [Apache Incubator](http://incubator.apache.org/projects/mxnet.html) - We are now an Apache Incubator project.
-* [0.10.0 Release](https://github.com/dmlc/mxnet/releases/tag/v0.10.0) - MXNet 0.10.0 Release.
+* [0.10.0 Release](https://github.com/apache/mxnet/releases/tag/v0.10.0) - MXNet 0.10.0 Release.
 * [0.9.3 Release](./docs/architecture/release_note_0_9.md) - First 0.9 official release.
 * [0.9.1 Release (NNVM refactor)](./docs/architecture/release_note_0_9.md) - NNVM branch is merged into master now. An official release will be made soon.
-* [0.8.0 Release](https://github.com/dmlc/mxnet/releases/tag/v0.8.0)
+* [0.8.0 Release](https://github.com/apache/mxnet/releases/tag/v0.8.0)
 
 ### Ecosystem News
 
diff --git a/docker/Dockerfiles/Dockerfile.in.lib.cpu b/docker/Dockerfiles/Dockerfile.in.lib.cpu
index c6de40c..38f47db 100644
--- a/docker/Dockerfiles/Dockerfile.in.lib.cpu
+++ b/docker/Dockerfiles/Dockerfile.in.lib.cpu
@@ -24,6 +24,6 @@ FROM ubuntu:14.04
 COPY install/cpp.sh install/
 RUN install/cpp.sh
 
-RUN git clone --recursive https://github.com/dmlc/mxnet && cd mxnet && \
+RUN git clone --recursive https://github.com/apache/mxnet && cd mxnet && \
     make -j$(nproc) && \
     rm -r build
diff --git a/docker/Dockerfiles/Dockerfile.in.lib.gpu b/docker/Dockerfiles/Dockerfile.in.lib.gpu
index 03b920a..a6eb80f 100644
--- a/docker/Dockerfiles/Dockerfile.in.lib.gpu
+++ b/docker/Dockerfiles/Dockerfile.in.lib.gpu
@@ -25,5 +25,5 @@ COPY install/cpp.sh install/
 RUN install/cpp.sh
 
 ENV BUILD_OPTS "USE_CUDA=1 USE_CUDA_PATH=/usr/local/cuda USE_CUDNN=1"
-RUN git clone --recursive https://github.com/dmlc/mxnet && cd mxnet && \
+RUN git clone --recursive https://github.com/apache/mxnet && cd mxnet && \
     make -j$(nproc) $BUILD_OPTS
diff --git a/docs/static_site/src/pages/api/faq/caffe.md b/docs/static_site/src/pages/api/faq/caffe.md
index f05b908..1928d48 100644
--- a/docs/static_site/src/pages/api/faq/caffe.md
+++ b/docs/static_site/src/pages/api/faq/caffe.md
@@ -32,7 +32,7 @@ Key topics covered include the following:
 ## Converting Caffe trained models to MXNet
 
 The converting tool is available at
-[tools/caffe_converter](https://github.com/dmlc/mxnet/tree/master/tools/caffe_converter). On
+[tools/caffe_converter](https://github.com/apache/mxnet/tree/v1.x/tools/caffe_converter). On
 the remaining of this section, we assume we are on the `tools/caffe_converter`
 directory.
 
@@ -205,4 +205,4 @@ train = mx.io.CaffeDataIter(
 ### Put it all together
 
 The complete example is available at
-[example/caffe](https://github.com/dmlc/mxnet/blob/master/example/caffe/)
+[example/caffe](https://github.com/apache/mxnet/blob/v1.x/example/caffe/)
diff --git a/docs/static_site/src/pages/api/faq/cloud.md b/docs/static_site/src/pages/api/faq/cloud.md
index 4807735..2a8c01f 100644
--- a/docs/static_site/src/pages/api/faq/cloud.md
+++ b/docs/static_site/src/pages/api/faq/cloud.md
@@ -113,7 +113,7 @@ The following commands build _MXNet_ with CUDA/CUDNN, Amazon S3, and distributed
 training.
 
 ```bash
-git clone --recursive https://github.com/dmlc/mxnet
+git clone --recursive https://github.com/apache/mxnet
 cd mxnet; cp make/config.mk .
 echo "USE_CUDA=1" >>config.mk
 echo "USE_CUDA_PATH=/usr/local/cuda" >>config.mk
@@ -192,7 +192,7 @@ cat hosts | xargs -I{} ssh -o StrictHostKeyChecking=no {} 'uname -a; pgrep pytho
 ```
 
 ***Note:*** The preceding example is very simple to train and therefore isn't a good
-benchmark for distributed training. Consider using other [examples](https://github.com/dmlc/mxnet/tree/master/example/image-classification).
+benchmark for distributed training. Consider using other [examples](https://github.com/apache/mxnet/tree/v1.x/example/image-classification).
 
 ### More Options
 #### Use Multiple Data Shards
diff --git a/docs/static_site/src/pages/api/faq/multi_devices.md b/docs/static_site/src/pages/api/faq/multi_devices.md
index d8bc81c..fbd6cb1 100644
--- a/docs/static_site/src/pages/api/faq/multi_devices.md
+++ b/docs/static_site/src/pages/api/faq/multi_devices.md
@@ -71,7 +71,7 @@ import mxnet as mx
 module = mx.module.Module(context=[mx.gpu(0), mx.gpu(2)], ...)
 ```
 while if the program accepts a `--gpus` flag (as seen in
-[example/image-classification](https://github.com/dmlc/mxnet/tree/master/example/image-classification)),
+[example/image-classification](https://github.com/apache/mxnet/tree/v1.x/example/image-classification)),
 then we can try
 ```bash
 python train_mnist.py --gpus 0,2 ...
@@ -130,7 +130,7 @@ When using a large number of GPUs, e.g. >=4, we suggest using `device` for bette
 
 Launching a distributed job is a bit different from running on a single
 machine. MXNet provides
-[tools/launch.py](https://github.com/dmlc/mxnet/blob/master/tools/launch.py) to
+[tools/launch.py](https://github.com/apache/mxnet/blob/v1.x/tools/launch.py) to
 start a job by using `ssh`, `mpi`, `sge`, or `yarn`.
 
 An easy way to set up a cluster of EC2 instances for distributed deep learning
@@ -139,7 +139,7 @@ If you do not have a cluster, you can check the repository before you continue.
 
 Assume we are at the directory `mxnet/example/image-classification`
 and want to train LeNet to classify MNIST images, as demonstrated here:
-[train_mnist.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/train_mnist.py).
+[train_mnist.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/train_mnist.py).
 
 On a single machine, we can run:
 
diff --git a/docs/static_site/src/pages/api/faq/new_op.md b/docs/static_site/src/pages/api/faq/new_op.md
index 053182b..ffacef6 100644
--- a/docs/static_site/src/pages/api/faq/new_op.md
+++ b/docs/static_site/src/pages/api/faq/new_op.md
@@ -144,12 +144,12 @@ To use the custom operator, create a mx.sym.Custom symbol with op_type as the re
 mlp = mx.symbol.Custom(data=fc3, name='softmax', op_type='softmax')
 ```
 
-Please see the full code for this example [here](https://github.com/dmlc/mxnet/blob/master/example/numpy-ops/custom_softmax.py).
+Please see the full code for this example [here](https://github.com/apache/mxnet/blob/v1.x/example/numpy-ops/custom_softmax.py).
 
 ## C++
 With MXNet v0.9 (the NNVM refactor) or later, creating new operators has become easier.
 Operators are now registered with NNVM.
-The following code is an example on how to register an operator (checkout [src/operator/tensor](https://github.com/dmlc/mxnet/tree/master/src/operator/tensor) for more examples):
+The following code is an example on how to register an operator (checkout [src/operator/tensor](https://github.com/apache/mxnet/tree/v1.x/src/operator/tensor) for more examples):
 
 ```c++
 NNVM_REGISTER_OP(abs)
@@ -189,7 +189,7 @@ In this section, we will go through the basic attributes MXNet expect for all op
 You can find the definition for them in the following two files:
 
 - [nnvm/op_attr_types.h](https://github.com/dmlc/nnvm/blob/master/include/nnvm/op_attr_types.h)
-- [mxnet/op_attr_types.h](https://github.com/dmlc/mxnet/blob/master/include/mxnet/op_attr_types.h)
+- [mxnet/op_attr_types.h](https://github.com/apache/mxnet/blob/v1.x/include/mxnet/op_attr_types.h)
 
 #### Descriptions (Optional)
 
diff --git a/docs/static_site/src/pages/api/faq/perf.md b/docs/static_site/src/pages/api/faq/perf.md
index 527346d..28c70a8 100644
--- a/docs/static_site/src/pages/api/faq/perf.md
+++ b/docs/static_site/src/pages/api/faq/perf.md
@@ -66,7 +66,7 @@ So whether you specify `cpu(0)` or `cpu()`, _MXNet_ will use all CPU cores on th
 ### Scoring results
 The following table shows performance of MXNet-1.2.0.rc1,
 namely number of images that can be predicted per second.
-We used [example/image-classification/benchmark_score.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/benchmark_score.py)
+We used [example/image-classification/benchmark_score.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/benchmark_score.py)
 to measure the performance on different AWS EC2 machines.
 
 AWS EC2 C5.18xlarge:
@@ -150,7 +150,7 @@ and V100 (EC2 p3.2xlarge).
 ### Scoring results
 
 Based on
-[example/image-classification/benchmark_score.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/benchmark_score.py)
+[example/image-classification/benchmark_score.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/benchmark_score.py)
 and  MXNet-1.2.0.rc1, with cuDNN 7.0.5
 
 - K80 (single GPU)
@@ -213,7 +213,7 @@ Below is the performance result on V100 using float 16.
 ### Training results
 
 Based on
-[example/image-classification/train_imagenet.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/train_imagenet.py)
+[example/image-classification/train_imagenet.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/train_imagenet.py)
 and  MXNet-1.2.0.rc1, with CUDNN 7.0.5. The benchmark script is available at
 [here](https://github.com/mli/mxnet-benchmark/blob/master/run_vary_batch.sh),
 where the batch size for Alexnet is increased by 16x.
@@ -260,7 +260,7 @@ It's critical to use the proper type of `kvstore` to get the best performance.
 Refer to [multi_device.md](https://mxnet.io/api/faq/distributed_training.html) for more
 details.
 
-Besides, we can use [tools/bandwidth](https://github.com/dmlc/mxnet/tree/master/tools/bandwidth)
+Besides, we can use [tools/bandwidth](https://github.com/apache/mxnet/tree/v1.x/tools/bandwidth)
 to find the communication cost per batch.
 Ideally, the communication cost should be less than the time to compute a batch.
 To reduce the communication cost, we can consider:
@@ -293,7 +293,7 @@ by summarizing at the operator level, instead of a function, kernel, or instruct
 
 The profiler can be turned on with an [environment variable]({{'/api/faq/env_var#control-the-profiler' | relative_url}})
 for an entire program run, or programmatically for just part of a run. Note that by default the profiler hides the details of each individual operator, and you can reveal the details by setting environment variables `MXNET_EXEC_BULK_EXEC_INFERENCE`, `MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN` and `MXNET_EXEC_BULK_EXEC_TRAIN` to 0.
-See [example/profiler](https://github.com/dmlc/mxnet/tree/master/example/profiler)
+See [example/profiler](https://github.com/apache/mxnet/tree/v1.x/example/profiler)
 for complete examples of how to use the profiler in code, or [this tutorial](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/profiler.html) on how to profile MXNet performance.
 
 Briefly, the Python code looks like:
diff --git a/docs/static_site/src/pages/api/faq/recordio.md b/docs/static_site/src/pages/api/faq/recordio.md
index 2e8fcdd..180b0cf 100644
--- a/docs/static_site/src/pages/api/faq/recordio.md
+++ b/docs/static_site/src/pages/api/faq/recordio.md
@@ -34,8 +34,8 @@ RecordIO implements a file format for a sequence of records. We recommend storin
 
 We provide two tools for creating a RecordIO dataset.
 
-* [im2rec.cc](https://github.com/dmlc/mxnet/blob/master/tools/im2rec.cc) - implements the tool using the C++ API.
-* [im2rec.py](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) - implements the tool using the Python API.
+* [im2rec.cc](https://github.com/apache/mxnet/blob/v1.x/tools/im2rec.cc) - implements the tool using the C++ API.
+* [im2rec.py](https://github.com/apache/mxnet/blob/v1.x/tools/im2rec.py) - implements the tool using the Python API.
 
 Both provide the same output: a RecordIO dataset.
 
diff --git a/docs/static_site/src/pages/api/faq/s3_integration.md b/docs/static_site/src/pages/api/faq/s3_integration.md
index de617e4..ecadaa5 100644
--- a/docs/static_site/src/pages/api/faq/s3_integration.md
+++ b/docs/static_site/src/pages/api/faq/s3_integration.md
@@ -70,7 +70,7 @@ aws s3 sync ./training-data s3://bucket-name/training-data
 
 Once the data is in S3, it is very straightforward to use it from MXNet. Any data iterator that can read/write data from a local drive can also read/write data from S3.
 
-Let's modify an existing example code in MXNet repository to read data from S3 instead of local disk. [`mxnet/tests/python/train/test_conv.py`](https://github.com/dmlc/mxnet/blob/master/tests/python/train/test_conv.py) trains a convolutional network using MNIST data from local disk. We'll do the following change to read the data from S3 instead.
+Let's modify an existing example code in MXNet repository to read data from S3 instead of local disk. [`mxnet/tests/python/train/test_conv.py`](https://github.com/apache/mxnet/blob/v1.x/tests/python/train/test_conv.py) trains a convolutional network using MNIST data from local disk. We'll do the following change to read the data from S3 instead.
 
 ```
 ~/mxnet$ sed -i -- 's/data\//s3:\/\/bucket-name\/training-data\//g' ./tests/python/train/test_conv.py
diff --git a/docs/static_site/src/pages/api/faq/smart_device.md b/docs/static_site/src/pages/api/faq/smart_device.md
index 7fe8ddd..96c4468 100644
--- a/docs/static_site/src/pages/api/faq/smart_device.md
+++ b/docs/static_site/src/pages/api/faq/smart_device.md
@@ -39,7 +39,7 @@ All that's necessary to create the library is to compile that single file.
 This simplifies the problem of porting to various platforms.
 
 Thanks to [Jack Deng](https://github.com/jdeng),
-MXNet provides an [amalgamation](https://github.com/dmlc/mxnet/tree/master/amalgamation) script
+MXNet provides an [amalgamation](https://github.com/apache/mxnet/tree/v1.x/amalgamation) script
 that compiles all code needed for prediction based on trained DL models into a single `.cc` file,
 containing approximately 30K lines of code. This code only depends on the BLAS library.
 Moreover, we've also created an even more minimal version,
@@ -53,8 +53,8 @@ Porting to another language with a C foreign function interface requires little
 For examples, see the following examples on GitHub:
 
 - Go: [https://github.com/jdeng/gomxnet](https://github.com/jdeng/gomxnet)
-- Java: [https://github.com/dmlc/mxnet/tree/master/amalgamation/jni](https://github.com/dmlc/mxnet/tree/master/amalgamation/jni)
-- Python: [https://github.com/dmlc/mxnet/tree/master/amalgamation/python](https://github.com/dmlc/mxnet/tree/master/amalgamation/python)
+- Java: [https://github.com/apache/mxnet/tree/v1.x/amalgamation/jni](https://github.com/apache/mxnet/tree/v1.x/amalgamation/jni)
+- Python: [https://github.com/apache/mxnet/tree/v1.x/amalgamation/python](https://github.com/apache/mxnet/tree/v1.x/amalgamation/python)
 
 
 If you plan to amalgamate your system, there are a few guidelines you ought to observe when building the project:
diff --git a/docs/static_site/src/pages/api/faq/visualize_graph.md b/docs/static_site/src/pages/api/faq/visualize_graph.md
index 8d47777..0df6a17 100644
--- a/docs/static_site/src/pages/api/faq/visualize_graph.md
+++ b/docs/static_site/src/pages/api/faq/visualize_graph.md
@@ -84,5 +84,5 @@ You should see computation graph something like the following image:
 width=400/>
 
 # References
-* [Example MXNet Matrix Factorization](https://github.com/dmlc/mxnet/blob/master/example/recommenders/demo1-MF.ipynb)
+* [Example MXNet Matrix Factorization](https://github.com/apache/mxnet/blob/v1.x/example/recommenders/demo1-MF.ipynb)
 * [Visualizing CNN Architecture of MXNet Tutorials](http://josephpcohen.com/w/visualizing-cnn-architectures-side-by-side-with-mxnet/)
diff --git a/docs/static_site/src/pages/api/r/docs/tutorials/callback_function.md b/docs/static_site/src/pages/api/r/docs/tutorials/callback_function.md
index d74112d..23abca2 100644
--- a/docs/static_site/src/pages/api/r/docs/tutorials/callback_function.md
+++ b/docs/static_site/src/pages/api/r/docs/tutorials/callback_function.md
@@ -175,7 +175,7 @@ You also can save the training and evaluation errors for later use by passing a
 How to Write Your Own Callback Functions
 ----------
 
-You can find the source code for the two callback functions on [GitHub](https://github.com/dmlc/mxnet/blob/master/R-package/R/callback.R) and use it as a template:
+You can find the source code for the two callback functions on [GitHub](https://github.com/apache/mxnet/blob/v1.x/R-package/R/callback.R) and use it as a template:
 
 Basically, all callback functions follow the following structure:
 
diff --git a/docs/static_site/src/pages/api/r/docs/tutorials/custom_iterator.md b/docs/static_site/src/pages/api/r/docs/tutorials/custom_iterator.md
index bb4cfc2..a1f8008 100644
--- a/docs/static_site/src/pages/api/r/docs/tutorials/custom_iterator.md
+++ b/docs/static_site/src/pages/api/r/docs/tutorials/custom_iterator.md
@@ -43,7 +43,7 @@ You'll get two files, `mnist_train.csv` that contains 60.000 examples of hand wr
 
 Custom CSV Iterator
 ----------
-Next we are going to create a custom CSV Iterator based on the [C++ CSVIterator class](https://github.com/dmlc/mxnet/blob/master/src/io/iter_csv.cc).
+Next we are going to create a custom CSV Iterator based on the [C++ CSVIterator class](https://github.com/apache/mxnet/blob/v1.x/src/io/iter_csv.cc).
 
 For that we are going to use the R function `mx.io.CSVIter` as a base class. This class has as parameters `data.csv, data.shape, batch.size` and two main functions, `iter.next()` that calls the iterator in the next batch of data and `value()` that returns the train data and the label.
 
diff --git a/docs/static_site/src/pages/api/r/docs/tutorials/five_minutes_neural_network.md b/docs/static_site/src/pages/api/r/docs/tutorials/five_minutes_neural_network.md
index f712140..cabd378 100644
--- a/docs/static_site/src/pages/api/r/docs/tutorials/five_minutes_neural_network.md
+++ b/docs/static_site/src/pages/api/r/docs/tutorials/five_minutes_neural_network.md
@@ -116,7 +116,7 @@ To get an idea of what is happening, view the computation graph from R:
     graph.viz(model$symbol)
  ```
 
-[<img src="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/knitr/graph.computation.png">](https://github.com/dmlc/mxnet)
+[<img src="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/knitr/graph.computation.png">](https://github.com/apache/mxnet)
 
  ```r
     preds = predict(model, test.x)
diff --git a/docs/static_site/src/pages/api/r/docs/tutorials/mnist_competition.md b/docs/static_site/src/pages/api/r/docs/tutorials/mnist_competition.md
index e34819d..898e80f 100644
--- a/docs/static_site/src/pages/api/r/docs/tutorials/mnist_competition.md
+++ b/docs/static_site/src/pages/api/r/docs/tutorials/mnist_competition.md
@@ -26,7 +26,7 @@ Handwritten Digits Classification Competition
 =============================================
 
 [MNIST](http://yann.lecun.com/exdb/mnist/) is a handwritten digits image data set created by Yann LeCun. Every digit is represented by a 28 x 28 pixel image. It's become a standard data set for testing classifiers on simple image input. A neural network is a strong model for image classification tasks. There's a [long-term hosted competition](https://www.kaggle.com/c/digit-recognizer) on Kaggle using this data set.
-This tutorial shows how to use [MXNet](https://github.com/dmlc/mxnet/tree/master/R-package) to compete in this challenge.
+This tutorial shows how to use [MXNet](https://github.com/apache/mxnet/tree/v1.x/R-package) to compete in this challenge.
 
 ## Loading the Data
 
diff --git a/docs/static_site/src/pages/api/scala/docs/tutorials/char_lstm.md b/docs/static_site/src/pages/api/scala/docs/tutorials/char_lstm.md
index bb23fd0..b4b106d 100644
--- a/docs/static_site/src/pages/api/scala/docs/tutorials/char_lstm.md
+++ b/docs/static_site/src/pages/api/scala/docs/tutorials/char_lstm.md
@@ -56,7 +56,7 @@ Edit the CLASS_PATH variable in the script to include your operating system-spec
 
 ```
 
-- Run inference with the [run_test_charrnn.sh script](https://github.com/dmlc/mxnet/blob/master/scala-package/examples/scripts/rnn/run_test_charrnn.sh). Edit the script as follows:
+- Run inference with the [run_test_charrnn.sh script](https://github.com/apache/mxnet/blob/v1.x/scala-package/examples/scripts/rnn/run_test_charrnn.sh). Edit the script as follows:
 
 Edit the CLASS_PATH variable in the script to include your operating system-specific folder (e.g., linux-x86_64-cpu/linux-x86_64-gpu/osx-x86_64-cpu) in the path. Run the script with the following command:
 
@@ -526,5 +526,5 @@ You can see the output generated from Obama's speeches. All of the line breaks,
 
 ## Next Steps
 * [Scala API]({{'/api/scala'|relative_url}})
-* [More Scala Examples](https://github.com/dmlc/mxnet/tree/master/scala-package/examples/)
+* [More Scala Examples](https://github.com/apache/mxnet/tree/v1.x/scala-package/examples/)
 * [MXNet tutorials index]({{'/api'|relative_url}})
diff --git a/docs/static_site/src/pages/api/scala/docs/tutorials/mnist.md b/docs/static_site/src/pages/api/scala/docs/tutorials/mnist.md
index d121b41..071269e 100644
--- a/docs/static_site/src/pages/api/scala/docs/tutorials/mnist.md
+++ b/docs/static_site/src/pages/api/scala/docs/tutorials/mnist.md
@@ -32,7 +32,7 @@ Let's train a 3-layer network (i.e multilayer perceptron network) on the MNIST d
 To complete this tutorial, we need:
 
 - to compile the latest MXNet version. See the MXNet installation instructions for your operating system in [Setup and Installation]({{'/get_started'|relative_url}}).
-- to compile the Scala API. See Scala API build instructions in [Build](https://github.com/dmlc/mxnet/tree/master/scala-package).
+- to compile the Scala API. See Scala API build instructions in [Build](https://github.com/apache/mxnet/tree/v1.x/scala-package).
 
 ## Define the Network
 
@@ -56,7 +56,7 @@ val mlp = Symbol.api.SoftmaxOutput(Some(fc3), name = "sm")
 
 Then, load the training and validation data using DataIterators.
 
-You can download the MNIST data using the [get_mnist_data script](https://github.com/dmlc/mxnet/blob/master/scala-package/core/scripts/get_mnist_data.sh). We've already written a DataIterator for the MNIST dataset:
+You can download the MNIST data using the [get_mnist_data script](https://github.com/apache/mxnet/blob/v1.x/scala-package/core/scripts/get_mnist_data.sh). We've already written a DataIterator for the MNIST dataset:
 
 ```scala
 // load MNIST dataset
@@ -137,5 +137,5 @@ Check out more MXNet Scala examples below.
 
 ## Next Steps
 * [Scala API]({{'/api/scala'|relative_url}})
-* [More Scala Examples](https://github.com/dmlc/mxnet/tree/master/scala-package/examples/)
+* [More Scala Examples](https://github.com/apache/mxnet/tree/v1.x/scala-package/examples/)
 * [MXNet tutorials index]({{'/api'|relative_url}})
diff --git a/example/README.md b/example/README.md
index 656b36d..b68afd5 100644
--- a/example/README.md
+++ b/example/README.md
@@ -48,7 +48,7 @@ Example applications or scripts should be submitted in this `example` folder.
 
 ### Tutorials
 
-If you have a tutorial idea for the website, download the [Jupyter notebook tutorial template](https://github.com/dmlc/mxnet/tree/master/example/MXNetTutorialTemplate.ipynb).
+If you have a tutorial idea for the website, download the [Jupyter notebook tutorial template](https://github.com/apache/mxnet/tree/v1.x/example/MXNetTutorialTemplate.ipynb).
 
 #### Tutorial location
 
@@ -93,8 +93,8 @@ If your tutorial depends on specific packages, simply add them to this provision
 * [MXNet Julia API](https://mxnet.apache.org/api/julia/index.html)
 * [MXNet Perl API](https://mxnet.apache.org/api/perl/index.html)
 * [go-mxnet-predictor](https://github.com/songtianyi/go-mxnet-predictor) - Go binding for inference
-* [MXNet JNI](https://github.com/dmlc/mxnet/tree/master/amalgamation/jni) - JNI(Android) library
-* [MXNet Amalgamation](https://github.com/dmlc/mxnet/tree/master/amalgamation) - Amalgamation (entire library in a single file)
+* [MXNet JNI](https://github.com/apache/mxnet/tree/v1.x/amalgamation/jni) - JNI(Android) library
+* [MXNet Amalgamation](https://github.com/apache/mxnet/tree/v1.x/amalgamation) - Amalgamation (entire library in a single file)
 * [MXNet Javascript](https://github.com/dmlc/mxnet.js/) - MXNetJS: Javascript Package for Deep Learning in Browser (without server)
 
 ### <a name="deep-learning-examples-mxnet"></a>Deep Learning Examples in the MXNet Project Repository
@@ -160,7 +160,7 @@ If your tutorial depends on specific packages, simply add them to this provision
 * "Learn to sort by LSTM" by [xlvector](https://github.com/xlvector) [github link](https://github.com/xlvector/learning-dl/tree/master/mxnet/lstm_sort) [Blog in Chinese](http://blog.xlvector.net/2016-05/mxnet-lstm-example/)
 * [Neural Art using extremely lightweight (<500K) neural network](https://github.com/pavelgonchar/neural-art-mini) Lightweight version of mxnet neural art implementation by [Pavel Gonchar](https://github.com/pavelgonchar)
 * [Neural Art with generative networks](https://github.com/zhaw/neural_style) by [zhaw](https://github.com/zhaw)
-* [Faster R-CNN in MXNet with distributed implementation and data parallelization](https://github.com/dmlc/mxnet/tree/master/example/rcnn)
+* [Faster R-CNN in MXNet with distributed implementation and data parallelization](https://github.com/apache/mxnet/tree/v1.x/example/rcnn)
 * [Asynchronous Methods for Deep Reinforcement Learning in MXNet](https://github.com/zmonoid/Asyn-RL-MXNet/blob/master/mx_asyn.py) by [zmonoid](https://github.com/zmonoid)
 * [Deep Q-learning in MXNet](https://github.com/zmonoid/DQN-MXNet) by [zmonoid](https://github.com/zmonoid)
 * [Face Detection with End-to-End Integration of a ConvNet and a 3D Model (ECCV16)](https://github.com/tfwu/FaceDetection-ConvNet-3D) by [tfwu](https://github.com/tfwu), source code for paper Yunzhu Li, Benyuan Sun, Tianfu Wu and Yizhou Wang, "Face Detection with End-to-End Integration of a ConvNet and a 3D Model", ECCV 2016 <https://arxiv.org/abs/1606.00850>
diff --git a/example/caffe/README.md b/example/caffe/README.md
index a497176..833c58a 100644
--- a/example/caffe/README.md
+++ b/example/caffe/README.md
@@ -19,7 +19,7 @@
 
 [Caffe](http://caffe.berkeleyvision.org/) has been a well-known and widely-used deep learning framework. Now MXNet has supported calling most caffe operators(layers) and loss functions directly in its symbolic graph! Using one's own customized caffe layer is also effortless.
 
-Besides Caffe, MXNet has already embedded Torch modules and its tensor mathematical functions. ([link](https://github.com/dmlc/mxnet/blob/master/docs/faq/torch.md))
+Besides Caffe, MXNet has already embedded Torch modules and its tensor mathematical functions. ([link](https://github.com/apache/mxnet/blob/v1.x/docs/faq/torch.md))
 
 This blog demonstrates two steps to use Caffe op in MXNet:
 
@@ -38,7 +38,7 @@ This blog demonstrates two steps to use Caffe op in MXNet:
 
 ## Caffe Operator (Layer)
 Caffe's neural network operator and loss functions are supported by MXNet through `mxnet.symbol.CaffeOp` and `mxnet.symbol.CaffeLoss` respectively.
-For example, the following code shows multi-layer perception network for classifying MNIST digits ([full code](https://github.com/dmlc/mxnet/blob/master/example/caffe/caffe_net.py)):
+For example, the following code shows multi-layer perception network for classifying MNIST digits ([full code](https://github.com/apache/mxnet/blob/v1.x/example/caffe/caffe_net.py)):
 
 ### Python
 ```Python
diff --git a/example/image-classification/README.md b/example/image-classification/README.md
index 7542006..5767242 100644
--- a/example/image-classification/README.md
+++ b/example/image-classification/README.md
@@ -56,7 +56,7 @@ commonly used options are listed as following:
 
 | Argument                      | Comments                                 |
 | ----------------------------- | ---------------------------------------- |
-| `network`                     | The network to train, which is defined in [symbol/](https://github.com/dmlc/mxnet/tree/master/example/image-classification/symbols). Some networks may accept additional arguments, such as `--num-layers` is used to specify the number of layers in ResNet. |
+| `network`                     | The network to train, which is defined in [symbol/](https://github.com/apache/mxnet/tree/v1.x/example/image-classification/symbols). Some networks may accept additional arguments, such as `--num-layers` is used to specify the number of layers in ResNet. |
 | `data-train`, `data-val`      | The data for training and validation. It can be either a filename or a directory. For the latter, all files in the directory will be used. But if `--benchmark 1` is used, then there two arguments will be ignored. |
 | `gpus`                        | The list of GPUs to use, such as `0` or `0,3,4,7`. If an empty string `''` is given, then we will use CPU. |
 | `batch-size`                  | The batch size for SGD training. It specifies the number of examples used for each SGD iteration. If we use *k* GPUs, then each GPU will compute *batch_size/k* examples in each time. |
@@ -116,7 +116,7 @@ Hints:
 ## Pre-trained Models
 
 We provide multiple pre-trained models on various datasets. Use
-[common/modelzone.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/common/modelzoo.py)
+[common/modelzone.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/common/modelzoo.py)
 to download these models. These models can be used in any front-end language
 MXNet supports. For example,
 [the tutorial](https://mxnet.io/tutorials/python/predict_image.html) shows how
@@ -128,9 +128,9 @@ It is first used by
 [ImageNet challenge 2012](http://www.image-net.org/challenges/LSVRC/2012/),
 which contains about 1.2M images with 1000 classes. To test these models, one
 can use
-[data/imagenet1k-val.sh](https://github.com/dmlc/mxnet/blob/master/example/image-classification/data/imagenet1k-val.sh)
+[data/imagenet1k-val.sh](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/data/imagenet1k-val.sh)
 to prepare the validation dataset and
-[score.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/score.py)
+[score.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/score.py)
 to calculate the accuracy.
 
 #### Single Center Crop Accuracy
@@ -206,10 +206,10 @@ around 20 million images.
 
 Fine-tune refers training with parameters partially intialized with pre-trained
 model. One can use
-[fine-tune.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/fine-tune.py)
+[fine-tune.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/fine-tune.py)
 to train another dataset with pre-trained models listed above. For example,
 first run
-[data/caltech256.sh](https://github.com/dmlc/mxnet/blob/master/example/image-classification/data/caltech256.sh)
+[data/caltech256.sh](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/data/caltech256.sh)
 to download and prepare the
 [Caltech-256](http://www.vision.caltech.edu/Image_Datasets/Caltech256/) dataset,
 then fine tune it with `imagenet11k-resnet-152` by using 8 GPUs:
@@ -257,7 +257,7 @@ distributed synchronized communication.
 For more usages:
 
 - One can use
-  [benchmark.py](https://github.com/dmlc/mxnet/blob/master/example/image-classification/benchmark.py)
+  [benchmark.py](https://github.com/apache/mxnet/blob/v1.x/example/image-classification/benchmark.py)
   to run distributed benchmarks (also for multiple GPUs with single machine)
 - A how-to [tutorial](https://mxnet.io/api/faq/distributed_training.html) with more
   explanation.
diff --git a/example/kaggle-ndsb1/training_curves.py b/example/kaggle-ndsb1/training_curves.py
index 67f25f0..4aae87d 100644
--- a/example/kaggle-ndsb1/training_curves.py
+++ b/example/kaggle-ndsb1/training_curves.py
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-## based on https://github.com/dmlc/mxnet/issues/1302
+## based on https://github.com/apache/mxnet/issues/1302
 ## Parses the model fit log file and generates a train/val vs epoch plot
 import matplotlib.pyplot as plt
 import numpy as np
diff --git a/example/rcnn/README.md b/example/rcnn/README.md
index a12ba2f..e453024 100644
--- a/example/rcnn/README.md
+++ b/example/rcnn/README.md
@@ -63,7 +63,7 @@ for example, `python3 test.py --dataset voc --network vgg16 --params model/vgg16
 * Jun 22, 2018: We simplified code. 
 
 ### Disclaimer
-This repository used code from [MXNet](https://github.com/dmlc/mxnet),
+This repository used code from [MXNet](https://github.com/apache/mxnet),
 [Fast R-CNN](https://github.com/rbgirshick/fast-rcnn),
 [Faster R-CNN](https://github.com/rbgirshick/py-faster-rcnn),
 [caffe](https://github.com/BVLC/caffe),
diff --git a/example/reinforcement-learning/dqn/operators.py b/example/reinforcement-learning/dqn/operators.py
index 0c9b588..a624e4c 100644
--- a/example/reinforcement-learning/dqn/operators.py
+++ b/example/reinforcement-learning/dqn/operators.py
@@ -30,7 +30,7 @@ class DQNOutput(mx.operator.CustomOp):
         self.assign(out_data[0], req[0], in_data[0])
 
     def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
-        # TODO Backward using NDArray will cause some troubles see `https://github.com/dmlc/mxnet/issues/1720'
+        # TODO Backward using NDArray will cause some troubles see `https://github.com/apache/mxnet/issues/1720'
         x = out_data[0].asnumpy()
         action = in_data[1].asnumpy().astype(numpy.int)
         reward = in_data[2].asnumpy()
diff --git a/example/ssd/README.md b/example/ssd/README.md
index dcb15f4..c78c260 100644
--- a/example/ssd/README.md
+++ b/example/ssd/README.md
@@ -53,7 +53,7 @@ Due to the permission issue, this example is maintained in this [repository](htt
 * Monitor validation mAP during training.
 * More network symbols under development and test.
 * Extra operators are now in `mxnet/src/operator/contrib`.
-* Old models are incompatible, use [e06c55d](https://github.com/dmlc/mxnet/commits/e06c55d6466a0c98c7def8f118a48060fb868901) or [e4f73f1](https://github.com/dmlc/mxnet/commits/e4f73f1f4e76397992c4b0a33c139d52b4b7af0e) for backward compatibility. Or, you can modify the json file to update the symbols if you are familiar with it, because only names have changed while weights and bias should still be good.
+* Old models are incompatible, use [e06c55d](https://github.com/apache/mxnet/commits/e06c55d6466a0c98c7def8f118a48060fb868901) or [e4f73f1](https://github.com/apache/mxnet/commits/e4f73f1f4e76397992c4b0a33c139d52b4b7af0e) for backward compatibility. Or, you can modify the json file to update the symbols if you are familiar with it, because only names have changed while weights and bias should still be good.
 
 ### Demo results
 ![demo1](https://cloud.githubusercontent.com/assets/3307514/19171057/8e1a0cc4-8be0-11e6-9d8f-088c25353b40.png)
diff --git a/julia/docs/src/index.md b/julia/docs/src/index.md
index 4213265..b5dc964 100644
--- a/julia/docs/src/index.md
+++ b/julia/docs/src/index.md
@@ -19,7 +19,7 @@
 
 [MXNet.jl](https://github.com/dmlc/MXNet.jl) is the
 [Julia](http://julialang.org/) package of
-[dmlc/mxnet](https://github.com/dmlc/mxnet). MXNet.jl brings flexible and efficient GPU
+[MXNet](https://github.com/apache/mxnet). MXNet.jl brings flexible and efficient GPU
 computing and state-of-art deep learning to Julia. Some highlight of features
 include:
 
diff --git a/julia/docs/src/user-guide/install.md b/julia/docs/src/user-guide/install.md
index 129b6a1..bde4a37 100644
--- a/julia/docs/src/user-guide/install.md
+++ b/julia/docs/src/user-guide/install.md
@@ -34,7 +34,7 @@ following command instead
 Pkg.checkout("MXNet")
 ```
 
-MXNet.jl is built on top of [libmxnet](https://github.com/dmlc/mxnet).
+MXNet.jl is built on top of [libmxnet](https://github.com/apache/mxnet).
 Upon installation, Julia will try to automatically download and build
 libmxnet.
 
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet.pm b/perl-package/AI-MXNet/lib/AI/MXNet.pm
index 9c8f682..0778da8 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet.pm
@@ -241,7 +241,7 @@ AI::MXNet - Perl interface to MXNet machine learning library
 =head1 SEE ALSO
 
     L<https://mxnet.io/>
-    L<https://github.com/dmlc/mxnet/tree/master/perl-package>
+    L<https://github.com/apache/mxnet/tree/v1.x/perl-package>
     L<Function::Parameters|https://metacpan.org/pod/Function::Parameters>, L<Mouse|https://metacpan.org/pod/Mouse>
 
 =head1 AUTHOR
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm b/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm
index 5575e37..3537891 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm
@@ -54,7 +54,7 @@ has 'base_lr' => (is => 'rw', isa => 'Num', default => 0.01);
     The exact value is the upper bound of the number of updates applied to
     a weight/index
 
-    See more details in https://github.com/dmlc/mxnet/issues/625
+    See more details in https://github.com/apache/mxnet/issues/625
 
     Parameters
     ----------
diff --git a/plugin/caffe/README.md b/plugin/caffe/README.md
index a497176..64f0227 100644
--- a/plugin/caffe/README.md
+++ b/plugin/caffe/README.md
@@ -19,7 +19,7 @@
 
 [Caffe](http://caffe.berkeleyvision.org/) has been a well-known and widely-used deep learning framework. Now MXNet has supported calling most caffe operators(layers) and loss functions directly in its symbolic graph! Using one's own customized caffe layer is also effortless.
 
-Besides Caffe, MXNet has already embedded Torch modules and its tensor mathematical functions. ([link](https://github.com/dmlc/mxnet/blob/master/docs/faq/torch.md))
+Besides Caffe, MXNet has already embedded Torch modules and its tensor mathematical functions. ([link](https://github.com/apache/mxnet/blob/master/docs/faq/torch.md))
 
 This blog demonstrates two steps to use Caffe op in MXNet:
 
@@ -38,7 +38,7 @@ This blog demonstrates two steps to use Caffe op in MXNet:
 
 ## Caffe Operator (Layer)
 Caffe's neural network operator and loss functions are supported by MXNet through `mxnet.symbol.CaffeOp` and `mxnet.symbol.CaffeLoss` respectively.
-For example, the following code shows multi-layer perception network for classifying MNIST digits ([full code](https://github.com/dmlc/mxnet/blob/master/example/caffe/caffe_net.py)):
+For example, the following code shows multi-layer perception network for classifying MNIST digits ([full code](https://github.com/apache/mxnet/blob/v1.x/example/caffe/caffe_net.py)):
 
 ### Python
 ```Python
diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala
index 750fd98..4eaa787 100644
--- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala
+++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TestCharRnn.scala
@@ -27,7 +27,7 @@ import scala.collection.JavaConverters._
 
 /**
  * Follows the demo, to test the char rnn:
- * https://github.com/dmlc/mxnet/blob/master/example/rnn/char-rnn.ipynb
+ * https://github.com/apache/mxnet/blob/v1.x/example/rnn/char-rnn.ipynb
  */
 object TestCharRnn {
 
diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala
index 2704715..059e5a2 100644
--- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala
+++ b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/rnn/TrainCharRnn.scala
@@ -25,7 +25,7 @@ import org.apache.mxnet.optimizer.Adam
 
 /**
   * Follows the demo, to train the char rnn:
-  * https://github.com/dmlc/mxnet/blob/master/example/rnn/char-rnn.ipynb
+  * https://github.com/apache/mxnet/blob/v1.x/example/rnn/char-rnn.ipynb
   */
 object TrainCharRnn {
 
diff --git a/src/engine/naive_engine.cc b/src/engine/naive_engine.cc
index d98f77d..34860db 100644
--- a/src/engine/naive_engine.cc
+++ b/src/engine/naive_engine.cc
@@ -252,7 +252,7 @@ class NaiveEngine final : public Engine {
 #endif
 /*!
  * \brief Holding a shared_ptr to the object pool to prevent it from being destructed too early
- * See also #309 (https://github.com/dmlc/mxnet/issues/309) and similar fix in threaded_engine.h.
+ * See also #309 (https://github.com/apache/mxnet/issues/309) and similar fix in threaded_engine.h.
  * Without this, segfaults seen on CentOS7 in test_operator_gpu.py:test_convolution_multiple_streams
  */
   std::shared_ptr<common::ObjectPool<NaiveOpr> > objpool_opr_ref_;
diff --git a/src/engine/threaded_engine.h b/src/engine/threaded_engine.h
index aa0e5a2..725593b 100644
--- a/src/engine/threaded_engine.h
+++ b/src/engine/threaded_engine.h
@@ -575,7 +575,7 @@ class ThreadedEngine : public Engine {
 
   /*!
    * \brief Holding a shared_ptr to the object pool to prevent it from being destructed too early
-   * See also #309 (https://github.com/dmlc/mxnet/issues/309)
+   * See also #309 (https://github.com/apache/mxnet/issues/309)
    */
   std::shared_ptr<common::ObjectPool<ThreadedOpr> >       objpool_opr_ref_;
   std::shared_ptr<common::ObjectPool<OprBlock> >          objpool_blk_ref_;
diff --git a/src/operator/svm_output.cc b/src/operator/svm_output.cc
index a52aa47..45591d6 100644
--- a/src/operator/svm_output.cc
+++ b/src/operator/svm_output.cc
@@ -90,7 +90,7 @@ MXNET_REGISTER_OP_PROPERTY(SVMOutput, SVMOutputProp)
 .describe(R"code(Computes support vector machine based transformation of the input.
 
 This tutorial demonstrates using SVM as output layer for classification instead of softmax:
-https://github.com/dmlc/mxnet/tree/master/example/svm_mnist.
+https://github.com/apache/mxnet/tree/v1.x/example/svm_mnist.
 
 )code")
 .add_argument("data", "NDArray-or-Symbol", "Input data for SVM transformation.")
diff --git a/tools/coreml/converter/_layers.py b/tools/coreml/converter/_layers.py
index 6590b13..3dcd531 100644
--- a/tools/coreml/converter/_layers.py
+++ b/tools/coreml/converter/_layers.py
@@ -103,11 +103,11 @@ def convert_reshape(net, node, module, builder):
 
     if any(item <= 0 for item in target_shape):
         raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.'
-                                  'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
+                                  'Feel free to file an issue here: https://github.com/apache/mxnet/issues.')
 
     if 'reverse' in node and node['reverse'] == 'True':
         raise NotImplementedError('"reverse" parameter is not supported by yet.'
-                                  'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
+                                  'Feel free to file an issue here: https://github.com/apache/mxnet/issues.')
 
     mode = 0 # CHANNEL_FIRST
     builder.add_reshape(name, input_name, output_name, target_shape, mode)