From 30c40c643c19f6e2ac8679fa76d09d0f9ceccc65 Mon Sep 17 00:00:00 2001
From: chenmengzheAAA <123789350+chenmengzheAAA@users.noreply.github.com>
Date: 星期四, 14 九月 2023 18:00:43 +0800
Subject: [PATCH] Update modelscope_models.md
---
funasr/runtime/python/onnxruntime/funasr_onnx/punc_bin.py | 78 ++++++++++++++++++++++++++++++++------
1 files changed, 65 insertions(+), 13 deletions(-)
diff --git a/funasr/runtime/python/onnxruntime/funasr_onnx/punc_bin.py b/funasr/runtime/python/onnxruntime/funasr_onnx/punc_bin.py
index 0eb764f..6e289f6 100644
--- a/funasr/runtime/python/onnxruntime/funasr_onnx/punc_bin.py
+++ b/funasr/runtime/python/onnxruntime/funasr_onnx/punc_bin.py
@@ -1,4 +1,6 @@
# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
import os.path
from pathlib import Path
@@ -8,24 +10,58 @@
from .utils.utils import (ONNXRuntimeError,
OrtInferSession, get_logger,
read_yaml)
-from .utils.utils import (TokenIDConverter, split_to_mini_sentence,code_mix_split_words)
+from .utils.utils import (TokenIDConverter, split_to_mini_sentence,code_mix_split_words,code_mix_split_words_jieba)
logging = get_logger()
class CT_Transformer():
+ """
+ Author: Speech Lab of DAMO Academy, Alibaba Group
+ CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
+ https://arxiv.org/pdf/2003.01309.pdf
+ """
def __init__(self, model_dir: Union[str, Path] = None,
batch_size: int = 1,
device_id: Union[str, int] = "-1",
quantize: bool = False,
- intra_op_num_threads: int = 4
+ intra_op_num_threads: int = 4,
+ cache_dir: str = None,
):
-
+
if not Path(model_dir).exists():
- raise FileNotFoundError(f'{model_dir} does not exist.')
-
+ try:
+ from modelscope.hub.snapshot_download import snapshot_download
+ except:
+ raise "You are exporting model from modelscope, please install modelscope and try it again. To install modelscope, you could:\n" \
+ "\npip3 install -U modelscope\n" \
+ "For the users in China, you could install with the command:\n" \
+ "\npip3 install -U modelscope -i https://mirror.sjtu.edu.cn/pypi/web/simple"
+ try:
+ model_dir = snapshot_download(model_dir, cache_dir=cache_dir)
+ except:
+ raise "model_dir must be model_name in modelscope or local path downloaded from modelscope, but is {}".format(
+ model_dir)
+
model_file = os.path.join(model_dir, 'model.onnx')
if quantize:
model_file = os.path.join(model_dir, 'model_quant.onnx')
+ if not os.path.exists(model_file):
+ print(".onnx is not exist, begin to export onnx")
+ try:
+ from funasr.export.export_model import ModelExport
+ except:
+ raise "You are exporting onnx, please install funasr and try it again. To install funasr, you could:\n" \
+ "\npip3 install -U funasr\n" \
+ "For the users in China, you could install with the command:\n" \
+ "\npip3 install -U funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple"
+ export_model = ModelExport(
+ cache_dir=cache_dir,
+ onnx=True,
+ device="cpu",
+ quant=quantize,
+ )
+ export_model.export(model_dir)
+
config_file = os.path.join(model_dir, 'punc.yaml')
config = read_yaml(config_file)
@@ -41,9 +77,18 @@
self.punc_list[i] = "锛�"
elif self.punc_list[i] == "銆�":
self.period = i
+ if "seg_jieba" in config:
+ self.seg_jieba = True
+ self.jieba_usr_dict_path = os.path.join(model_dir, 'jieba_usr_dict')
+ self.code_mix_split_words_jieba = code_mix_split_words_jieba(self.jieba_usr_dict_path)
+ else:
+ self.seg_jieba = False
def __call__(self, text: Union[list, str], split_size=20):
- split_text = code_mix_split_words(text)
+ if self.seg_jieba:
+ split_text = self.code_mix_split_words_jieba(text)
+ else:
+ split_text = code_mix_split_words(text)
split_text_id = self.converter.tokens2ids(split_text)
mini_sentences = split_to_mini_sentence(split_text, split_size)
mini_sentences_id = split_to_mini_sentence(split_text_id, split_size)
@@ -57,7 +102,7 @@
mini_sentence = mini_sentences[mini_sentence_i]
mini_sentence_id = mini_sentences_id[mini_sentence_i]
mini_sentence = cache_sent + mini_sentence
- mini_sentence_id = np.array(cache_sent_id + mini_sentence_id, dtype='int64')
+ mini_sentence_id = np.array(cache_sent_id + mini_sentence_id, dtype='int32')
data = {
"text": mini_sentence_id[None,:],
"text_lengths": np.array([len(mini_sentence_id)], dtype='int32'),
@@ -119,13 +164,19 @@
class CT_Transformer_VadRealtime(CT_Transformer):
+ """
+ Author: Speech Lab of DAMO Academy, Alibaba Group
+ CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
+ https://arxiv.org/pdf/2003.01309.pdf
+ """
def __init__(self, model_dir: Union[str, Path] = None,
batch_size: int = 1,
device_id: Union[str, int] = "-1",
quantize: bool = False,
- intra_op_num_threads: int = 4
+ intra_op_num_threads: int = 4,
+ cache_dir: str = None
):
- super(CT_Transformer_VadRealtime, self).__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads)
+ super(CT_Transformer_VadRealtime, self).__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads, cache_dir=cache_dir)
def __call__(self, text: str, param_dict: map, split_size=20):
cache_key = "cache"
@@ -136,7 +187,7 @@
else:
precache = ""
cache = []
- full_text = precache + text
+ full_text = precache + " " + text
split_text = code_mix_split_words(full_text)
split_text_id = self.converter.tokens2ids(split_text)
mini_sentences = split_to_mini_sentence(split_text, split_size)
@@ -154,13 +205,14 @@
mini_sentence = mini_sentences[mini_sentence_i]
mini_sentence_id = mini_sentences_id[mini_sentence_i]
mini_sentence = cache_sent + mini_sentence
- mini_sentence_id = np.concatenate((cache_sent_id, mini_sentence_id), axis=0)
+ mini_sentence_id = np.concatenate((cache_sent_id, mini_sentence_id), axis=0,dtype='int32')
text_length = len(mini_sentence_id)
+ vad_mask = self.vad_mask(text_length, len(cache))[None, None, :, :].astype(np.float32)
data = {
"input": mini_sentence_id[None,:],
"text_lengths": np.array([text_length], dtype='int32'),
- "vad_mask": self.vad_mask(text_length, len(cache))[None, None, :, :].astype(np.float32),
- "sub_masks": np.tril(np.ones((text_length, text_length), dtype=np.float32))[None, None, :, :].astype(np.float32)
+ "vad_mask": vad_mask,
+ "sub_masks": vad_mask
}
try:
outputs = self.infer(data['input'], data['text_lengths'], data['vad_mask'], data["sub_masks"])
--
Gitblit v1.9.1