From 2b747626c898fb6b2ee78038ad052b761a77269d Mon Sep 17 00:00:00 2001
From: yijinsheng <1183186048@qq.com>
Date: 星期一, 11 十一月 2024 23:52:48 +0800
Subject: [PATCH] paraformer_large_offline triton运行bug 修复
---
runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/1/model.py | 6 +++---
runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/config.pbtxt | 2 +-
runtime/triton_gpu/Dockerfile/Dockerfile.server | 15 +++++++++++----
runtime/triton_gpu/README_paraformer_offline.md | 2 ++
4 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/runtime/triton_gpu/Dockerfile/Dockerfile.server b/runtime/triton_gpu/Dockerfile/Dockerfile.server
index d03610c..b9f9c40 100644
--- a/runtime/triton_gpu/Dockerfile/Dockerfile.server
+++ b/runtime/triton_gpu/Dockerfile/Dockerfile.server
@@ -8,11 +8,18 @@
RUN apt-get update && apt-get -y install \
python3-dev \
cmake \
- libsndfile1
-
+ libsndfile1 \
+ language-pack-zh-hans
+ENV LANG="zh_CN.UTF-8"
+ENV LANGUAGE="zh_CN:zh:en_US:en"
# -i https://pypi.tuna.tsinghua.edu.cn/simple
-RUN pip3 install torch torchaudio
-RUN pip3 install kaldifeat pyyaml
+RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
+RUN pip3 install torch==2.4.1 torchaudio==2.4.1
+RUN pip3 install pyyaml
+
+##Refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html for installation.
+RUN pip3 install https://hf-mirror.com/csukuangfj/kaldifeat/resolve/main/ubuntu-cuda/kaldifeat-1.25.5.dev20240914+cuda12.1.torch2.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
+
# Dependency for client
RUN pip3 install soundfile grpcio-tools tritonclient
diff --git a/runtime/triton_gpu/README_paraformer_offline.md b/runtime/triton_gpu/README_paraformer_offline.md
index 48e889c..ceaa571 100644
--- a/runtime/triton_gpu/README_paraformer_offline.md
+++ b/runtime/triton_gpu/README_paraformer_offline.md
@@ -10,6 +10,7 @@
cp $pretrained_model_dir/am.mvn ./model_repo_paraformer_large_offline/feature_extractor/
cp $pretrained_model_dir/config.yaml ./model_repo_paraformer_large_offline/feature_extractor/
+cp $pretrained_model_dir/tokens.json ./model_repo_paraformer_large_offline/scoring/1/
# Refer here to get model.onnx (https://github.com/alibaba-damo-academy/FunASR/blob/main/funasr/export/README.md)
cp <exported_onnx_dir>/model.onnx ./model_repo_paraformer_large_offline/encoder/1/
@@ -33,6 +34,7 @@
`-- scoring
|-- 1
| `-- model.py
+ | -- tokens.json
`-- config.pbtxt
8 directories, 9 files
diff --git a/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/1/model.py b/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/1/model.py
index d582e50..6f7e720 100644
--- a/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/1/model.py
+++ b/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/1/model.py
@@ -74,8 +74,8 @@
load lang_char.txt
"""
with open(str(vocab_file), "rb") as f:
- config = yaml.load(f, Loader=yaml.Loader)
- return config["token_list"]
+ vocab_list = json.load(f, encoding='utf-8')
+ return vocab_list
def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
@@ -142,7 +142,7 @@
]
responses = []
for i in range(total_seq):
- sents = np.array(hyps[i : i + 1])
+ sents = np.array(hyps[i: i + 1])
out0 = pb_utils.Tensor("OUTPUT0", sents.astype(self.out0_dtype))
inference_response = pb_utils.InferenceResponse(output_tensors=[out0])
responses.append(inference_response)
diff --git a/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/config.pbtxt b/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/config.pbtxt
index 85082f4..578fb30 100644
--- a/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/config.pbtxt
+++ b/runtime/triton_gpu/model_repo_paraformer_large_offline/scoring/config.pbtxt
@@ -23,7 +23,7 @@
},
{
key: "vocabulary",
- value: { string_value: "./model_repo_paraformer_large_offline/feature_extractor/config.yaml"}
+ value: { string_value: "./model_repo_paraformer_large_offline/scoring/1/tokens.json"}
},
{
key: "lm_path"
--
Gitblit v1.9.1