From f803707b4723a7baa8f589da6b36dd463ee484bc Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期二, 26 三月 2024 17:53:48 +0800
Subject: [PATCH] fix GetValue BLADEDISC
---
runtime/onnxruntime/src/paraformer-torch.cpp | 14 +++++++++++---
1 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/runtime/onnxruntime/src/paraformer-torch.cpp b/runtime/onnxruntime/src/paraformer-torch.cpp
index a08f1bd..7cac291 100644
--- a/runtime/onnxruntime/src/paraformer-torch.cpp
+++ b/runtime/onnxruntime/src/paraformer-torch.cpp
@@ -38,15 +38,22 @@
LOG(ERROR) << "CUDA is not available! Please check your GPU settings";
exit(-1);
} else {
- LOG(INFO) << "CUDA available! Running on GPU";
+ LOG(INFO) << "CUDA is available, running on GPU";
device = at::kCUDA;
}
#endif
#ifdef USE_IPEX
torch::jit::setTensorExprFuserEnabled(false);
#endif
- torch::jit::script::Module model = torch::jit::load(am_model, device);
- model_ = std::make_shared<TorchModule>(std::move(model));
+
+ try {
+ torch::jit::script::Module model = torch::jit::load(am_model, device);
+ model_ = std::make_shared<TorchModule>(std::move(model));
+ LOG(INFO) << "Successfully load model from " << am_model;
+ } catch (std::exception const &e) {
+ LOG(ERROR) << "Error when load am model: " << e.what();
+ exit(-1);
+ }
}
void ParaformerTorch::InitLm(const std::string &lm_file,
@@ -359,6 +366,7 @@
}
std::vector<std::vector<float>> ParaformerTorch::CompileHotwordEmbedding(std::string &hotwords) {
+ // TODO
std::vector<std::vector<float>> result(1, std::vector<float>(10, 0.0f));
return result;
}
--
Gitblit v1.9.1