forked from PaddlePaddle/PaddleGAN
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[TIPC] add tipc c++ infer for msvsr (PaddlePaddle#676)
* add tipc c++ infer for msvsr * add tipc c++ infer for msvsr
- Loading branch information
Showing
11 changed files
with
762 additions
and
10 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,223 @@ | ||
project(vsr CXX C) | ||
cmake_minimum_required(VERSION 3.14) | ||
|
||
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) | ||
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) | ||
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) | ||
option(WITH_TENSORRT "Compile demo with TensorRT." OFF) | ||
|
||
SET(PADDLE_LIB "" CACHE PATH "Location of libraries") | ||
SET(OPENCV_DIR "" CACHE PATH "Location of libraries") | ||
SET(CUDA_LIB "" CACHE PATH "Location of libraries") | ||
SET(CUDNN_LIB "" CACHE PATH "Location of libraries") | ||
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT") | ||
|
||
set(DEMO_NAME "vsr") | ||
|
||
|
||
macro(safe_set_static_flag) | ||
foreach(flag_var | ||
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE | ||
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) | ||
if(${flag_var} MATCHES "/MD") | ||
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") | ||
endif(${flag_var} MATCHES "/MD") | ||
endforeach(flag_var) | ||
endmacro() | ||
|
||
if (WITH_MKL) | ||
ADD_DEFINITIONS(-DUSE_MKL) | ||
endif() | ||
|
||
if(NOT DEFINED PADDLE_LIB) | ||
message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") | ||
endif() | ||
|
||
if(NOT DEFINED OPENCV_DIR) | ||
message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv") | ||
endif() | ||
|
||
|
||
if (WIN32) | ||
include_directories("${PADDLE_LIB}/paddle/include") | ||
link_directories("${PADDLE_LIB}/paddle/lib") | ||
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH) | ||
|
||
else () | ||
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH) | ||
include_directories("${PADDLE_LIB}/paddle/include") | ||
link_directories("${PADDLE_LIB}/paddle/lib") | ||
endif () | ||
include_directories(${OpenCV_INCLUDE_DIRS}) | ||
|
||
if (WIN32) | ||
add_definitions("/DGOOGLE_GLOG_DLL_DECL=") | ||
if(WITH_MKL) | ||
set(FLAG_OPENMP "/openmp") | ||
endif() | ||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") | ||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") | ||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") | ||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") | ||
if (WITH_STATIC_LIB) | ||
safe_set_static_flag() | ||
add_definitions(-DSTATIC_LIB) | ||
endif() | ||
message("cmake c debug flags " ${CMAKE_C_FLAGS_DEBUG}) | ||
message("cmake c release flags " ${CMAKE_C_FLAGS_RELEASE}) | ||
message("cmake cxx debug flags " ${CMAKE_CXX_FLAGS_DEBUG}) | ||
message("cmake cxx release flags " ${CMAKE_CXX_FLAGS_RELEASE}) | ||
else() | ||
if(WITH_MKL) | ||
set(FLAG_OPENMP "-fopenmp") | ||
endif() | ||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o3 ${FLAG_OPENMP} -std=c++11") | ||
set(CMAKE_STATIC_LIBRARY_PREFIX "") | ||
message("cmake cxx flags" ${CMAKE_CXX_FLAGS}) | ||
endif() | ||
|
||
if (WITH_GPU) | ||
if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "") | ||
message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda-8.0/lib64") | ||
endif() | ||
if (NOT WIN32) | ||
if (NOT DEFINED CUDNN_LIB) | ||
message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64") | ||
endif() | ||
endif(NOT WIN32) | ||
endif() | ||
|
||
include_directories("${PADDLE_LIB}/third_party/install/protobuf/include") | ||
include_directories("${PADDLE_LIB}/third_party/install/glog/include") | ||
include_directories("${PADDLE_LIB}/third_party/install/gflags/include") | ||
include_directories("${PADDLE_LIB}/third_party/install/xxhash/include") | ||
include_directories("${PADDLE_LIB}/third_party/install/zlib/include") | ||
include_directories("${PADDLE_LIB}/third_party/boost") | ||
include_directories("${PADDLE_LIB}/third_party/eigen3") | ||
|
||
include_directories("${CMAKE_SOURCE_DIR}/") | ||
|
||
if (NOT WIN32) | ||
if (WITH_TENSORRT AND WITH_GPU) | ||
include_directories("${TENSORRT_DIR}/include") | ||
link_directories("${TENSORRT_DIR}/lib") | ||
endif() | ||
endif(NOT WIN32) | ||
|
||
link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") | ||
|
||
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") | ||
link_directories("${PADDLE_LIB}/third_party/install/glog/lib") | ||
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") | ||
link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib") | ||
link_directories("${PADDLE_LIB}/paddle/lib") | ||
|
||
|
||
if(WITH_MKL) | ||
include_directories("${PADDLE_LIB}/third_party/install/mklml/include") | ||
if (WIN32) | ||
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.lib | ||
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.lib) | ||
else () | ||
set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} | ||
${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
execute_process(COMMAND cp -r ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib) | ||
endif () | ||
set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") | ||
if(EXISTS ${MKLDNN_PATH}) | ||
include_directories("${MKLDNN_PATH}/include") | ||
if (WIN32) | ||
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) | ||
else () | ||
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) | ||
endif () | ||
endif() | ||
else() | ||
if (WIN32) | ||
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) | ||
else () | ||
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) | ||
endif () | ||
endif() | ||
|
||
# Note: libpaddle_inference_api.so/a must put before libpaddle_inference.so/a | ||
if(WITH_STATIC_LIB) | ||
if(WIN32) | ||
set(DEPS | ||
${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) | ||
else() | ||
set(DEPS | ||
${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) | ||
endif() | ||
else() | ||
if(WIN32) | ||
set(DEPS | ||
${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
else() | ||
set(DEPS | ||
${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
endif() | ||
endif(WITH_STATIC_LIB) | ||
|
||
if (NOT WIN32) | ||
set(DEPS ${DEPS} | ||
${MATH_LIB} ${MKLDNN_LIB} | ||
glog gflags protobuf z xxhash | ||
) | ||
if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib") | ||
set(DEPS ${DEPS} snappystream) | ||
endif() | ||
if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib") | ||
set(DEPS ${DEPS} snappy) | ||
endif() | ||
else() | ||
set(DEPS ${DEPS} | ||
${MATH_LIB} ${MKLDNN_LIB} | ||
glog gflags_static libprotobuf xxhash) | ||
set(DEPS ${DEPS} libcmt shlwapi) | ||
if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib") | ||
set(DEPS ${DEPS} snappy) | ||
endif() | ||
if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib") | ||
set(DEPS ${DEPS} snappystream) | ||
endif() | ||
endif(NOT WIN32) | ||
|
||
|
||
if(WITH_GPU) | ||
if(NOT WIN32) | ||
if (WITH_TENSORRT) | ||
set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
endif() | ||
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX}) | ||
else() | ||
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) | ||
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) | ||
set(DEPS ${DEPS} ${CUDNN_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX}) | ||
endif() | ||
endif() | ||
|
||
|
||
if (NOT WIN32) | ||
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread") | ||
set(DEPS ${DEPS} ${EXTERNAL_LIB}) | ||
endif() | ||
|
||
set(DEPS ${DEPS} ${OpenCV_LIBS}) | ||
|
||
AUX_SOURCE_DIRECTORY(./src SRCS) | ||
add_executable(${DEMO_NAME} ${SRCS}) | ||
target_link_libraries(${DEMO_NAME} ${DEPS}) | ||
|
||
if (WIN32 AND WITH_MKL) | ||
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD | ||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./mklml.dll | ||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll | ||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll | ||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll | ||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll | ||
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll | ||
) | ||
endif() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
#include "opencv2/core.hpp" | ||
#include "opencv2/imgcodecs.hpp" | ||
#include "opencv2/imgproc.hpp" | ||
#include <chrono> | ||
#include <iomanip> | ||
#include <iostream> | ||
#include <ostream> | ||
#include <vector> | ||
|
||
#include <cstring> | ||
#include <fstream> | ||
#include <numeric> | ||
|
||
using namespace std; | ||
|
||
class Normalize { | ||
public: | ||
virtual void Run(cv::Mat *im, const std::vector<float> &mean, | ||
const std::vector<float> &scale, const bool is_scale = true); | ||
}; | ||
|
||
|
||
// RGB -> CHW | ||
class Permute { | ||
public: | ||
virtual void Run(const cv::Mat *im, float *data); | ||
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
#include <string> | ||
#include <vector> | ||
#include <memory> | ||
#include <utility> | ||
#include <ctime> | ||
#include <numeric> | ||
|
||
#include <opencv2/core/core.hpp> | ||
#include <opencv2/imgproc/imgproc.hpp> | ||
#include <opencv2/highgui/highgui.hpp> | ||
|
||
#include "include/process_op.h" | ||
#include "paddle_inference_api.h" | ||
|
||
namespace PaddleGAN { | ||
|
||
class VSR { | ||
public: | ||
explicit VSR(const std::string& model_path, | ||
const std::string& param_path, | ||
const std::string& device, | ||
const int& gpu_id, | ||
const bool& use_mkldnn, | ||
const int& cpu_threads) { | ||
|
||
this->device_ = device; | ||
this->gpu_id_ = gpu_id; | ||
this->use_mkldnn_ = use_mkldnn_; | ||
this->cpu_threads_ = cpu_threads; | ||
|
||
LoadModel(model_path, param_path); | ||
} | ||
|
||
// Load paddle inference model | ||
void LoadModel(const std::string& model_path, const std::string& param_path); | ||
|
||
// Run predictor | ||
void Run(const std::vector<cv::Mat>& imgs, std::vector<cv::Mat>* result = nullptr); | ||
|
||
private: | ||
std::shared_ptr<paddle_infer::Predictor> predictor_; | ||
|
||
std::string device_ = "GPU"; | ||
int gpu_id_ = 0; | ||
bool use_mkldnn_ = false; | ||
int cpu_threads_ = 1; | ||
|
||
std::vector<float> mean_ = {0., 0., 0.}; | ||
std::vector<float> scale_ = {1., 1., 1.}; | ||
|
||
// pre/post-process | ||
Permute permute_op_; | ||
Normalize normalize_op_; | ||
std::vector<float> Preprocess(cv::Mat& img); | ||
}; | ||
|
||
} |
Oops, something went wrong.