diff --git a/build-for-1.9.0.sh b/build-for-1.9.0.sh new file mode 100755 index 0000000000..2ae0273c24 --- /dev/null +++ b/build-for-1.9.0.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + + +set -ex + +if [ ! -f ./setup.py || -f ./sherpa-onnx/c-api/c-api.h || ! -f ./android/SherpaOnnx ]; then + echo "please run this script inside the sherpa-onnx directory" + exit 1 +fi + +if [ ! -d /Users/fangjun/t/onnxruntime-osx-x64-1.9.0/lib ]; then + mkdir -p /Users/fangjun/t + pushd /Users/fangjun/t + wget https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-osx-x64-1.9.0.tgz + tar xvf onnxruntime-osx-x64-1.9.0.tgz + rm onnxruntime-osx-x64-1.9.0.tgz + popd +fi + +export SHERPA_ONNXRUNTIME_LIB_DIR=/Users/fangjun/t/onnxruntime-osx-x64-1.9.0/lib +export SHERPA_ONNXRUNTIME_INCLUDE_DIR=/Users/fangjun/t/onnxruntime-osx-x64-1.9.0/include + +mkdir -p ./build-1.9.0 +cd ./build-1.9.0 +cmake -DBUILD_SHARED_LIBS=ON .. +make diff --git a/sherpa-onnx/csrc/provider-config.h b/sherpa-onnx/csrc/provider-config.h index 4c9c0db015..79d9cb2b94 100644 --- a/sherpa-onnx/csrc/provider-config.h +++ b/sherpa-onnx/csrc/provider-config.h @@ -14,7 +14,11 @@ namespace sherpa_onnx { struct CudaConfig { +#if ORT_API_VERSION >= 10 int32_t cudnn_conv_algo_search = OrtCudnnConvAlgoSearchHeuristic; +#else + int32_t cudnn_conv_algo_search = 1; +#endif CudaConfig() = default; explicit CudaConfig(int32_t cudnn_conv_algo_search) diff --git a/sherpa-onnx/csrc/session.cc b/sherpa-onnx/csrc/session.cc index a33594f0b5..9adbb9db93 100644 --- a/sherpa-onnx/csrc/session.cc +++ b/sherpa-onnx/csrc/session.cc @@ -11,7 +11,7 @@ #include "sherpa-onnx/csrc/macros.h" #include "sherpa-onnx/csrc/provider.h" -#if defined(__APPLE__) +#if defined(__APPLE__) && (ORT_API_VERSION >= 10) #include "coreml_provider_factory.h" // NOLINT #endif @@ -76,9 +76,10 @@ Ort::SessionOptions GetSessionOptionsImpl( break; } case Provider::kTRT: { +#if ORT_API_VERSION >= 10 if (provider_config == nullptr) { SHERPA_ONNX_LOGE( - "Tensorrt support for Online models ony," + "Tensorrt support for Online models only," "Must be extended for offline and others"); exit(1); } @@ -151,6 +152,12 @@ Ort::SessionOptions GetSessionOptionsImpl( } // break; is omitted here intentionally so that // if TRT not available, CUDA will be used +#else + SHERPA_ONNX_LOGE( + "Tensorrt is not supported. Version of onnxruntime %d is too old. " + "Fallback to cuda provider", + static_cast(ORT_API_VERSION)); +#endif } case Provider::kCUDA: { if (std::find(available_providers.begin(), available_providers.end(), @@ -165,7 +172,11 @@ Ort::SessionOptions GetSessionOptionsImpl( } else { options.device_id = 0; // Default OrtCudnnConvAlgoSearchExhaustive is extremely slow +#if ORT_API_VERSION >= 10 options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchHeuristic; +#else + options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearch(1); +#endif // set more options on need } sess_opts.AppendExecutionProvider_CUDA(options); @@ -196,7 +207,7 @@ Ort::SessionOptions GetSessionOptionsImpl( break; } case Provider::kCoreML: { -#if defined(__APPLE__) +#if defined(__APPLE__) && (ORT_API_VERSION >= 10) uint32_t coreml_flags = 0; (void)OrtSessionOptionsAppendExecutionProvider_CoreML(sess_opts, coreml_flags);