From 113b7c74f1bff3e3f801a193da8488d19d89dcfe Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期三, 27 三月 2024 11:12:14 +0800
Subject: [PATCH] update cmakelist

---
 runtime/onnxruntime/src/paraformer-torch.cpp |   14 +++++++++++---
 1 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/runtime/onnxruntime/src/paraformer-torch.cpp b/runtime/onnxruntime/src/paraformer-torch.cpp
index a08f1bd..06c88f6 100644
--- a/runtime/onnxruntime/src/paraformer-torch.cpp
+++ b/runtime/onnxruntime/src/paraformer-torch.cpp
@@ -38,15 +38,22 @@
         LOG(ERROR) << "CUDA is not available! Please check your GPU settings";
         exit(-1);
     } else {
-        LOG(INFO) << "CUDA available! Running on GPU";
+        LOG(INFO) << "CUDA is available, running on GPU";
         device = at::kCUDA;
     }
     #endif
     #ifdef USE_IPEX
     torch::jit::setTensorExprFuserEnabled(false);
     #endif
-    torch::jit::script::Module model = torch::jit::load(am_model, device);
-    model_ = std::make_shared<TorchModule>(std::move(model));    
+
+    try {
+        torch::jit::script::Module model = torch::jit::load(am_model, device);
+        model_ = std::make_shared<TorchModule>(std::move(model)); 
+        LOG(INFO) << "Successfully load model from " << am_model;
+    } catch (std::exception const &e) {
+        LOG(ERROR) << "Error when load am model: " << am_model << e.what();
+        exit(-1);
+    }
 }
 
 void ParaformerTorch::InitLm(const std::string &lm_file, 
@@ -359,6 +366,7 @@
 }
 
 std::vector<std::vector<float>> ParaformerTorch::CompileHotwordEmbedding(std::string &hotwords) {
+    // TODO
     std::vector<std::vector<float>> result(1, std::vector<float>(10, 0.0f));
     return result;
 }

--
Gitblit v1.9.1