You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/03/29 19:06:58 UTC

[GitHub] piiswrong closed pull request #10151: WIP Do not merge: Updated to Jetpack 3.2, fix minor wheel issue

piiswrong closed pull request #10151: WIP Do not merge: Updated to Jetpack 3.2, fix minor wheel issue
URL: https://github.com/apache/incubator-mxnet/pull/10151
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/ci/docker/Dockerfile.build.jetson b/ci/docker/Dockerfile.build.jetson
index e49b48e7e1c..8b1a275752f 100755
--- a/ci/docker/Dockerfile.build.jetson
+++ b/ci/docker/Dockerfile.build.jetson
@@ -20,7 +20,7 @@
 # This script assumes /work/mxnet exists and contains the mxnet code you wish to compile and
 # that /work/build exists and is the target for your output.
 
-FROM nvidia/cuda:8.0-cudnn6-devel as cudabuilder
+FROM nvidia/cuda:9.0-cudnn7-devel as cudabuilder
 
 FROM dockcross/linux-arm64
 
@@ -46,25 +46,25 @@ ENV PATH $PATH:/usr/local/cuda/bin
 ENV TARGET_ARCH aarch64
 ENV TARGET_OS linux
 
-# Install ARM depedencies based on Jetpack 3.1
-RUN JETPACK_DOWNLOAD_PREFIX=http://developer.download.nvidia.com/devzone/devcenter/mobile/jetpack_l4t/013/linux-x64 && \
-    ARM_CUDA_INSTALLER_PACKAGE=cuda-repo-l4t-8-0-local_8.0.84-1_arm64.deb && \
-    ARM_CUDNN_INSTALLER_PACKAGE=libcudnn6_6.0.21-1+cuda8.0_arm64.deb && \
-    ARM_CUDNN_DEV_INSTALLER_PACKAGE=libcudnn6-dev_6.0.21-1+cuda8.0_arm64.deb && \
+# Install ARM depedencies based on Jetpack 3.2
+RUN JETPACK_DOWNLOAD_PREFIX=http://developer.download.nvidia.com/devzone/devcenter/mobile/jetpack_l4t/3.2GA/m892ki/JetPackL4T_32_b196/ && \
+    ARM_CUDA_INSTALLER_PACKAGE=cuda-repo-l4t-9-0-local_9.0.252-1_arm64.deb && \
+    ARM_CUDNN_INSTALLER_PACKAGE=libcudnn7_7.0.5.13-1+cuda9.0_arm64.deb && \
+    ARM_CUDNN_DEV_INSTALLER_PACKAGE=libcudnn7-dev_7.0.5.13-1+cuda9.0_arm64.deb && \
     wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDA_INSTALLER_PACKAGE && \
     wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDNN_INSTALLER_PACKAGE && \
     wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDNN_DEV_INSTALLER_PACKAGE && \
     dpkg -i $ARM_CUDA_INSTALLER_PACKAGE && \
+    apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub && \
     dpkg -i $ARM_CUDNN_INSTALLER_PACKAGE && \
     dpkg -i $ARM_CUDNN_DEV_INSTALLER_PACKAGE && \
     apt update -y  && \
-    apt install -y unzip cuda-cudart-cross-aarch64-8-0 cuda-cublas-cross-aarch64-8-0 \
-    cuda-nvml-cross-aarch64-8-0 cuda-nvrtc-cross-aarch64-8-0 cuda-cufft-cross-aarch64-8-0 \
-    cuda-curand-cross-aarch64-8-0 cuda-cusolver-cross-aarch64-8-0 cuda-cusparse-cross-aarch64-8-0 \
-    cuda-misc-headers-cross-aarch64-8-0 cuda-npp-cross-aarch64-8-0 libcudnn6  && \
-    cp /usr/local/cuda-8.0/targets/aarch64-linux/lib/*.so /usr/local/cuda/lib64/ && \
-    cp /usr/local/cuda-8.0/targets/aarch64-linux/lib/stubs/*.so /usr/local/cuda/lib64/stubs/ && \
-    cp -r /usr/local/cuda-8.0/targets/aarch64-linux/include/ /usr/local/cuda/include/ && \
+    apt install -y unzip cuda-cudart-dev-9-0 cuda-cublas-9-0 cuda-nvml-dev-9-0 \
+    cuda-nvrtc-dev-9-0 cuda-cufft-dev-9-0 cuda-curand-dev-9-0 cuda-cusolver-9-0 \
+    cuda-cusparse-dev-9-0 cuda-misc-headers-9-0 cuda-npp-dev-9-0 libcudnn7 && \
+    cp /usr/local/cuda-9.0/targets/aarch64-linux/lib/*.so /usr/local/cuda/lib64/ && \
+    cp /usr/local/cuda-9.0/targets/aarch64-linux/lib/stubs/*.so /usr/local/cuda/lib64/stubs/ && \
+    cp -r /usr/local/cuda-9.0/targets/aarch64-linux/include/ /usr/local/cuda/include/ && \
     rm $ARM_CUDA_INSTALLER_PACKAGE $ARM_CUDNN_INSTALLER_PACKAGE $ARM_CUDNN_DEV_INSTALLER_PACKAGE
 
 WORKDIR /work/mxnet
diff --git a/ci/docker/config/arm.crosscompile.mk b/ci/docker/config/arm.crosscompile.mk
new file mode 100644
index 00000000000..2bca9e396d7
--- /dev/null
+++ b/ci/docker/config/arm.crosscompile.mk
@@ -0,0 +1,161 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#-------------------------------------------------------------------------------
+#  Template configuration for compiling mxnet
+#
+#  If you want to change the configuration, please use the following
+#  steps. Assume you are on the root directory of mxnet. First copy the this
+#  file so that any local changes will be ignored by git
+#
+#  $ cp make/config.mk .
+#
+#  Next modify the according entries, and then compile by
+#
+#  $ make
+#
+#  or build in parallel with 8 threads
+#
+#  $ make -j8
+#-------------------------------------------------------------------------------
+
+#---------------------
+# We do not assign compilers here.  Often when cross-compiling these will already 
+# be set correctly.
+#--------------------
+
+export NVCC = nvcc
+
+# whether compile with options for MXNet developer
+DEV = 0
+
+# whether compile with debug
+DEBUG = 0
+
+# whether compiler with profiler
+USE_PROFILER =
+
+# the additional link flags you want to add
+# TODO: Move flags here
+ADD_LDFLAGS=-static-libstdc++
+
+# the additional compile flags you want to add
+ADD_CFLAGS =
+
+#---------------------------------------------
+# matrix computation libraries for CPU/GPU
+#---------------------------------------------
+
+# whether use CUDA during compile
+USE_CUDA = 0
+
+# add the path to CUDA library to link and compile flag
+# if you have already add them to environment variable, leave it as NONE
+# USE_CUDA_PATH = /usr/local/cuda
+USE_CUDA_PATH = NONE
+
+# whether use CuDNN R3 library
+USE_CUDNN = 0
+
+# whether use cuda runtime compiling for writing kernels in native language (i.e. Python)
+USE_NVRTC = 0
+
+# whether use opencv during compilation
+# you can disable it, however, you will not able to use
+# imbin iterator
+USE_OPENCV = 0
+
+# use openmp for parallelization
+USE_OPENMP = 1
+
+# whether use NNPACK library
+USE_NNPACK = 0
+
+# For arm builds we're using openblas
+USE_BLAS = openblas
+
+# whether use lapack during compilation
+# only effective when compiled with blas versions openblas/apple/atlas/mkl
+USE_LAPACK = 1
+
+# path to lapack library in case of a non-standard installation
+USE_LAPACK_PATH =
+
+# add path to intel library, you may need it for MKL, if you did not add the path
+# to environment variable
+USE_INTEL_PATH = NONE
+
+# If use MKL only for BLAS, choose static link automatically to allow python wrapper
+USE_STATIC_MKL = NONE
+ifeq ($(USE_BLAS), mkl)
+USE_STATIC_MKL = 1
+endif
+
+#----------------------------
+# distributed computing
+#----------------------------
+
+# whether or not to enable multi-machine supporting
+USE_DIST_KVSTORE = 0
+
+# whether or not allow to read and write HDFS directly. If yes, then hadoop is
+# required
+USE_HDFS = 0
+
+# path to libjvm.so. required if USE_HDFS=1
+LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server
+
+# whether or not allow to read and write AWS S3 directly. If yes, then
+# libcurl4-openssl-dev is required, it can be installed on Ubuntu by
+# sudo apt-get install -y libcurl4-openssl-dev
+USE_S3 = 0
+
+#----------------------------
+# additional operators
+#----------------------------
+
+# path to folders containing projects specific operators that you don't want to put in src/operators
+EXTRA_OPERATORS =
+
+#----------------------------
+# other features
+#----------------------------
+
+# Create C++ interface package
+USE_CPP_PACKAGE = 0
+
+#----------------------------
+# plugins
+#----------------------------
+
+# whether to use caffe integration. This requires installing caffe.
+# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH
+# CAFFE_PATH = $(HOME)/caffe
+# MXNET_PLUGINS += plugin/caffe/caffe.mk
+
+# whether to use torch integration. This requires installing torch.
+# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH
+# TORCH_PATH = $(HOME)/torch
+# MXNET_PLUGINS += plugin/torch/torch.mk
+
+# WARPCTC_PATH = $(HOME)/warp-ctc
+# MXNET_PLUGINS += plugin/warpctc/warpctc.mk
+
+# whether to use sframe integration. This requires build sframe
+# git@github.com:dato-code/SFrame.git
+# SFRAME_PATH = $(HOME)/SFrame
+# MXNET_PLUGINS += plugin/sframe/plugin.mk
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 39809f28127..95f3a33d859 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -37,37 +37,31 @@ clean_repo() {
 
 build_jetson() {
     set -ex
-    pushd .
+    cp ci/docker/config/arm.crosscompile.mk make/config.mk
+    make \
+        USE_OPENCV=0                   \
+        USE_SSE=0                      \
+        USE_BLAS=openblas              \
+        USE_CUDA=1                     \
+        USE_CUDNN=1                    \
+        ENABLE_CUDA_RTC=0              \
+        USE_NCCL=0                     \
+        USE_CUDA_PATH=/usr/local/cuda/ \
+        -j$(nproc)
 
-    #cd /work/mxnet
-    #make -j$(nproc) USE_OPENCV=0 USE_BLAS=openblas USE_SSE=0 USE_CUDA=1 USE_CUDNN=1 ENABLE_CUDA_RTC=0 USE_NCCL=0 USE_CUDA_PATH=/usr/local/cuda/
-    cd /work/build
-    cmake\
-        -DUSE_CUDA=OFF\
-        -DUSE_OPENCV=OFF\
-        -DUSE_OPENMP=ON\
-        -DUSE_SIGNAL_HANDLER=ON\
-        -DUSE_MKL_IF_AVAILABLE=OFF\
-        -DUSE_LAPACK=OFF\
-        -DCMAKE_BUILD_TYPE=RelWithDebInfo\
-        -G Ninja /work/mxnet
-    ninja
     export MXNET_LIBRARY_PATH=`pwd`/libmxnet.so
     cd /work/mxnet/python
     python setup.py bdist_wheel --universal
 
-
-    # Fix pathing issues in the wheel.  We need to move libmxnet.so from the data folder to the root
-    # of the wheel, then repackage the wheel.
-    # Create a temp dir to do the work.
-    # TODO: move apt call to install
+    # Fix pathing issues in the wheel.  We need to move libmxnet.so from the data folder to the
+    # mxnet folder, then repackage the wheel.
     WHEEL=`readlink -f dist/*.whl`
     TMPDIR=`mktemp -d`
     unzip -d $TMPDIR $WHEEL
     rm $WHEEL
     cd $TMPDIR
     mv *.data/data/mxnet/libmxnet.so mxnet
-    zip -r $WHEEL $TMPDIR
+    zip -r $WHEEL .
     cp $WHEEL /work/build
     rm -rf $TMPDIR
     popd
diff --git a/docker_multiarch/Dockerfile.build.jetson b/docker_multiarch/Dockerfile.build.jetson
new file mode 100644
index 00000000000..0a7f8b95caa
--- /dev/null
+++ b/docker_multiarch/Dockerfile.build.jetson
@@ -0,0 +1,93 @@
+# -*- mode: dockerfile -*-
+# dockerfile to build libmxnet.so, and a python wheel for the Jetson TX1/TX2
+
+FROM nvidia/cuda:9.0-cudnn7-devel as cudabuilder
+
+FROM dockcross/linux-arm64
+
+ENV ARCH aarch64
+ENV NVCCFLAGS "-m64"
+ENV CUDA_ARCH "-gencode arch=compute_53,code=sm_53 -gencode arch=compute_62,code=sm_62"
+ENV BUILD_OPTS "USE_OPENCV=0 USE_BLAS=openblas USE_SSE=0 USE_CUDA=1 USE_CUDNN=1 ENABLE_CUDA_RTC=0 USE_NCCL=0 USE_CUDA_PATH=/usr/local/cuda/"
+ENV CC /usr/bin/aarch64-linux-gnu-gcc
+ENV CXX /usr/bin/aarch64-linux-gnu-g++
+ENV FC /usr/bin/aarch64-linux-gnu-gfortran-4.9
+ENV HOSTCC gcc
+
+WORKDIR /work
+
+# Build OpenBLAS
+ADD https://api.github.com/repos/xianyi/OpenBLAS/git/refs/heads/master /tmp/openblas_version.json
+RUN git clone https://github.com/xianyi/OpenBLAS.git && \
+    cd OpenBLAS && \
+    make -j$(nproc) TARGET=ARMV8 && \
+    PREFIX=/usr make install
+
+# Setup CUDA build env (including configuring and copying nvcc)
+COPY --from=cudabuilder /usr/local/cuda /usr/local/cuda
+ENV PATH $PATH:/usr/local/cuda/bin
+ENV TARGET_ARCH aarch64
+ENV TARGET_OS linux
+
+# Install ARM depedencies based on Jetpack 3.2
+RUN JETPACK_DOWNLOAD_PREFIX=http://developer.download.nvidia.com/devzone/devcenter/mobile/jetpack_l4t/3.2GA/m892ki/JetPackL4T_32_b196/ && \
+    ARM_CUDA_INSTALLER_PACKAGE=cuda-repo-l4t-9-0-local_9.0.252-1_arm64.deb && \
+    ARM_CUDNN_INSTALLER_PACKAGE=libcudnn7_7.0.5.13-1+cuda9.0_arm64.deb && \
+    ARM_CUDNN_DEV_INSTALLER_PACKAGE=libcudnn7-dev_7.0.5.13-1+cuda9.0_arm64.deb && \
+    wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDA_INSTALLER_PACKAGE && \
+    wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDNN_INSTALLER_PACKAGE && \
+    wget -nv $JETPACK_DOWNLOAD_PREFIX/$ARM_CUDNN_DEV_INSTALLER_PACKAGE && \
+    dpkg -i $ARM_CUDA_INSTALLER_PACKAGE && \
+    apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub && \
+    dpkg -i $ARM_CUDNN_INSTALLER_PACKAGE && \
+    dpkg -i $ARM_CUDNN_DEV_INSTALLER_PACKAGE && \
+    apt update -y  && \
+    apt install -y unzip cuda-cudart-dev-9-0 cuda-cublas-9-0 cuda-nvml-dev-9-0 \
+    cuda-nvrtc-dev-9-0 cuda-cufft-dev-9-0 cuda-curand-dev-9-0 cuda-cusolver-9-0 \
+    cuda-cusparse-dev-9-0 cuda-misc-headers-9-0 cuda-npp-dev-9-0 libcudnn7 && \
+    cp /usr/local/cuda-9.0/targets/aarch64-linux/lib/* /usr/local/cuda/lib64/ && \
+    cp /usr/local/cuda-9.0/lib64/* /usr/local/cuda/lib64/ && \
+    cp /usr/local/cuda-9.0/targets/aarch64-linux/lib/stubs/*.so /usr/local/cuda/lib64/stubs/ && \
+    cp -r /usr/local/cuda-9.0/targets/aarch64-linux/include/ /usr/local/cuda/include/ && \
+    rm $ARM_CUDA_INSTALLER_PACKAGE $ARM_CUDNN_INSTALLER_PACKAGE $ARM_CUDNN_DEV_INSTALLER_PACKAGE
+
+# Build MXNet
+RUN git clone --recurse https://github.com/apache/incubator-mxnet.git mxnet
+
+WORKDIR /work/mxnet
+
+# Add ARM specific settings
+ADD arm.crosscompile.mk make/config.mk
+
+# Build and link
+RUN make -j$(nproc) $BUILD_OPTS
+
+# Create a binary wheel for easy installation.
+# When using tool.py output will be in the jetson folder.
+# Scp the .whl file to your target device, and install via
+# pip install
+WORKDIR /work/mxnet/python
+RUN python setup.py  bdist_wheel --universal
+
+# Copy build artifacts to output folder for tool.py script
+RUN mkdir -p /work/build & cp dist/*.whl /work/build && cp ../lib/* /work/build
+
+# Fix pathing issues in the wheel.  We need to move libmxnet.so from the data folder to the root
+# of the wheel, then repackage the wheel.
+# Create a temp dir to do the work.
+WORKDIR /work/build
+RUN apt-get install -y unzip && \
+    mkdir temp && \
+    cp *.whl temp
+
+# Extract the wheel, move the libmxnet.so file, repackage the wheel.
+WORKDIR /work/build/temp
+RUN unzip *.whl &&  \
+    rm *.whl && \
+    mv *.data/data/mxnet/libmxnet.so mxnet && \
+    zip -r ../temp.zip *
+
+# Replace the existing wheel with our fixed version.
+WORKDIR /work/build
+RUN rm -rf temp && \
+    for f in *.whl; do rm "$f" && mv temp.zip "$f"; done
diff --git a/docker_multiarch/arm.crosscompile.mk b/docker_multiarch/arm.crosscompile.mk
new file mode 100644
index 00000000000..2bca9e396d7
--- /dev/null
+++ b/docker_multiarch/arm.crosscompile.mk
@@ -0,0 +1,161 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#-------------------------------------------------------------------------------
+#  Template configuration for compiling mxnet
+#
+#  If you want to change the configuration, please use the following
+#  steps. Assume you are on the root directory of mxnet. First copy the this
+#  file so that any local changes will be ignored by git
+#
+#  $ cp make/config.mk .
+#
+#  Next modify the according entries, and then compile by
+#
+#  $ make
+#
+#  or build in parallel with 8 threads
+#
+#  $ make -j8
+#-------------------------------------------------------------------------------
+
+#---------------------
+# We do not assign compilers here.  Often when cross-compiling these will already 
+# be set correctly.
+#--------------------
+
+export NVCC = nvcc
+
+# whether compile with options for MXNet developer
+DEV = 0
+
+# whether compile with debug
+DEBUG = 0
+
+# whether compiler with profiler
+USE_PROFILER =
+
+# the additional link flags you want to add
+# TODO: Move flags here
+ADD_LDFLAGS=-static-libstdc++
+
+# the additional compile flags you want to add
+ADD_CFLAGS =
+
+#---------------------------------------------
+# matrix computation libraries for CPU/GPU
+#---------------------------------------------
+
+# whether use CUDA during compile
+USE_CUDA = 0
+
+# add the path to CUDA library to link and compile flag
+# if you have already add them to environment variable, leave it as NONE
+# USE_CUDA_PATH = /usr/local/cuda
+USE_CUDA_PATH = NONE
+
+# whether use CuDNN R3 library
+USE_CUDNN = 0
+
+# whether use cuda runtime compiling for writing kernels in native language (i.e. Python)
+USE_NVRTC = 0
+
+# whether use opencv during compilation
+# you can disable it, however, you will not able to use
+# imbin iterator
+USE_OPENCV = 0
+
+# use openmp for parallelization
+USE_OPENMP = 1
+
+# whether use NNPACK library
+USE_NNPACK = 0
+
+# For arm builds we're using openblas
+USE_BLAS = openblas
+
+# whether use lapack during compilation
+# only effective when compiled with blas versions openblas/apple/atlas/mkl
+USE_LAPACK = 1
+
+# path to lapack library in case of a non-standard installation
+USE_LAPACK_PATH =
+
+# add path to intel library, you may need it for MKL, if you did not add the path
+# to environment variable
+USE_INTEL_PATH = NONE
+
+# If use MKL only for BLAS, choose static link automatically to allow python wrapper
+USE_STATIC_MKL = NONE
+ifeq ($(USE_BLAS), mkl)
+USE_STATIC_MKL = 1
+endif
+
+#----------------------------
+# distributed computing
+#----------------------------
+
+# whether or not to enable multi-machine supporting
+USE_DIST_KVSTORE = 0
+
+# whether or not allow to read and write HDFS directly. If yes, then hadoop is
+# required
+USE_HDFS = 0
+
+# path to libjvm.so. required if USE_HDFS=1
+LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server
+
+# whether or not allow to read and write AWS S3 directly. If yes, then
+# libcurl4-openssl-dev is required, it can be installed on Ubuntu by
+# sudo apt-get install -y libcurl4-openssl-dev
+USE_S3 = 0
+
+#----------------------------
+# additional operators
+#----------------------------
+
+# path to folders containing projects specific operators that you don't want to put in src/operators
+EXTRA_OPERATORS =
+
+#----------------------------
+# other features
+#----------------------------
+
+# Create C++ interface package
+USE_CPP_PACKAGE = 0
+
+#----------------------------
+# plugins
+#----------------------------
+
+# whether to use caffe integration. This requires installing caffe.
+# You also need to add CAFFE_PATH/build/lib to your LD_LIBRARY_PATH
+# CAFFE_PATH = $(HOME)/caffe
+# MXNET_PLUGINS += plugin/caffe/caffe.mk
+
+# whether to use torch integration. This requires installing torch.
+# You also need to add TORCH_PATH/install/lib to your LD_LIBRARY_PATH
+# TORCH_PATH = $(HOME)/torch
+# MXNET_PLUGINS += plugin/torch/torch.mk
+
+# WARPCTC_PATH = $(HOME)/warp-ctc
+# MXNET_PLUGINS += plugin/warpctc/warpctc.mk
+
+# whether to use sframe integration. This requires build sframe
+# git@github.com:dato-code/SFrame.git
+# SFRAME_PATH = $(HOME)/SFrame
+# MXNET_PLUGINS += plugin/sframe/plugin.mk


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services