You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by la...@apache.org on 2021/02/04 15:09:30 UTC
[incubator-mxnet] branch v1.x updated: Simplify TRT build by adding
onnx_tensorrt targets in CMake (#19742)
This is an automated email from the ASF dual-hosted git repository.
lausen pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/v1.x by this push:
new 4bd6d48 Simplify TRT build by adding onnx_tensorrt targets in CMake (#19742)
4bd6d48 is described below
commit 4bd6d484bda46382a86e22fedd875e38af7dea47
Author: Serge Panev <sp...@nvidia.com>
AuthorDate: Fri Feb 5 00:04:59 2021 +0900
Simplify TRT build by adding onnx_tensorrt targets in CMake (#19742)
Signed-off-by: Serge Panev <sp...@nvidia.com>
---
CMakeLists.txt | 24 +++++++++---------------
ci/docker/runtime_functions.sh | 35 +++++------------------------------
2 files changed, 14 insertions(+), 45 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c4b37bb..09ccf44 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -239,29 +239,23 @@ if(USE_TENSORRT)
message(STATUS "Using TensorRT")
set(ONNX_PATH 3rdparty/onnx-tensorrt/third_party/onnx/build/)
set(ONNX_TRT_PATH 3rdparty/onnx-tensorrt/build/)
+ add_definitions(-DMXNET_USE_TENSORRT=1)
+ add_definitions(-DONNX_NAMESPACE=onnx)
+ add_definitions(-DONNX_ML=1)
+ set(BUILD_SHARED_LIBS_SAVED "${BUILD_SHARED_LIBS}")
+ set(BUILD_SHARED_LIBS ON)
+ add_subdirectory(3rdparty/onnx-tensorrt/ EXCLUDE_FROM_ALL)
+ set(BUILD_SHARED_LIBS "${BUILD_SHARED_LIBS_SAVED}")
include_directories(${ONNX_PATH})
include_directories(3rdparty/onnx-tensorrt/)
include_directories(3rdparty/)
include_directories(3rdparty/onnx-tensorrt/third_party/onnx/)
- add_definitions(-DMXNET_USE_TENSORRT=1)
- add_definitions(-DONNX_NAMESPACE=onnx)
- add_definitions(-DONNX_ML=1)
find_package(Protobuf REQUIRED)
- find_library(ONNX_LIBRARY NAMES libonnx.so REQUIRED
- PATHS ${ONNX_PATH}
- DOC "Path to onnx library.")
- find_library(ONNX_PROTO_LIBRARY NAMES libonnx_proto.so REQUIRED
- PATHS ${ONNX_PATH}
- DOC "Path to onnx_proto library.")
- find_library(ONNX_TRT_PARSER_LIBRARY NAMES libnvonnxparser.so REQUIRED
- PATHS ${ONNX_TRT_PATH}
- DOC "Path to onnx_proto parser library.")
-
- list(APPEND mxnet_LINKER_LIBS libnvinfer.so ${ONNX_TRT_PARSER_LIBRARY}
- ${ONNX_PROTO_LIBRARY} ${ONNX_LIBRARY} ${PROTOBUF_LIBRARY})
+ list(APPEND mxnet_LINKER_LIBS libnvinfer.so nvonnxparser
+ onnx_proto onnx ${PROTOBUF_LIBRARY})
endif()
# please note that when you enable this, you might run into an linker not being able to work properly due to large code injection.
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 0bbc6d7..e47b384 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -709,40 +709,11 @@ build_ubuntu_gpu_tensorrt() {
build_ccache_wrappers
- export ONNX_NAMESPACE=onnx
-
- # Build ONNX
- pushd .
- echo "Installing ONNX."
- cd 3rdparty/onnx-tensorrt/third_party/onnx
- rm -rf build
- mkdir -p build
- cd build
- cmake -DCMAKE_CXX_FLAGS=-I/usr/include/python${PYVER} -DBUILD_SHARED_LIBS=ON ..
- make -j$(nproc)
- export LIBRARY_PATH=`pwd`:`pwd`/onnx/:$LIBRARY_PATH
- export CPLUS_INCLUDE_PATH=`pwd`:$CPLUS_INCLUDE_PATH
- export CXXFLAGS=-I`pwd`
- popd
-
- # Build ONNX-TensorRT
- pushd .
- cd 3rdparty/onnx-tensorrt/
- mkdir -p build
- cd build
- cmake -DONNX_NAMESPACE=$ONNX_NAMESPACE ..
- make -j$(nproc)
- export LIBRARY_PATH=`pwd`:$LIBRARY_PATH
- popd
-
- mkdir -p /work/mxnet/lib/
- cp 3rdparty/onnx-tensorrt/third_party/onnx/build/*.so /work/mxnet/lib/
- cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser.so* /work/mxnet/lib/
-
cd /work/build
cmake -DUSE_CUDA=1 \
-DUSE_CUDNN=1 \
-DUSE_OPENCV=1 \
+ -DONNX_NAMESPACE=onnx \
-DUSE_TENSORRT=1 \
-DUSE_OPENMP=0 \
-DUSE_MKLDNN=0 \
@@ -752,6 +723,10 @@ build_ubuntu_gpu_tensorrt() {
/work/mxnet
ninja
+
+ mkdir -p /work/mxnet/lib/
+ cp 3rdparty/onnx-tensorrt/third_party/onnx/*.so /work/mxnet/lib/
+ cp -L 3rdparty/onnx-tensorrt/libnvonnxparser.so* /work/mxnet/lib/
}
build_ubuntu_gpu_mkldnn() {