From c4ac64fd5d24bb3fc8ccc441d36a07c83c8b9015 Mon Sep 17 00:00:00 2001
From: Yu Cao <monstercy@hotmail.com>
Date: 星期三, 01 十月 2025 14:46:21 +0800
Subject: [PATCH] fix "can not find model issue when running libtorch runtime" (#2504)
---
funasr/models/ct_transformer/utils.py | 55 +++++++++++++++++++++----------------------------------
1 files changed, 21 insertions(+), 34 deletions(-)
diff --git a/funasr/models/ct_transformer/utils.py b/funasr/models/ct_transformer/utils.py
index 917f2e0..b6e11e7 100644
--- a/funasr/models/ct_transformer/utils.py
+++ b/funasr/models/ct_transformer/utils.py
@@ -1,4 +1,10 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
import re
+
def split_to_mini_sentence(words: list, word_limit: int = 20):
assert word_limit > 1
@@ -8,32 +14,11 @@
length = len(words)
sentence_len = length // word_limit
for i in range(sentence_len):
- sentences.append(words[i * word_limit:(i + 1) * word_limit])
+ sentences.append(words[i * word_limit : (i + 1) * word_limit])
if length % word_limit > 0:
- sentences.append(words[sentence_len * word_limit:])
+ sentences.append(words[sentence_len * word_limit :])
return sentences
-
-# def split_words(text: str, **kwargs):
-# words = []
-# segs = text.split()
-# for seg in segs:
-# # There is no space in seg.
-# current_word = ""
-# for c in seg:
-# if len(c.encode()) == 1:
-# # This is an ASCII char.
-# current_word += c
-# else:
-# # This is a Chinese char.
-# if len(current_word) > 0:
-# words.append(current_word)
-# current_word = ""
-# words.append(c)
-# if len(current_word) > 0:
-# words.append(current_word)
-#
-# return words
def split_words(text: str, jieba_usr_dict=None, **kwargs):
if jieba_usr_dict:
@@ -43,21 +28,21 @@
token_list_tmp = []
language_flag = None
for token in input_list:
- if isEnglish(token) and language_flag == 'Chinese':
+ if isEnglish(token) and language_flag == "Chinese":
token_list_all.append(token_list_tmp)
- langauge_list.append('Chinese')
+ langauge_list.append("Chinese")
token_list_tmp = []
- elif not isEnglish(token) and language_flag == 'English':
+ elif not isEnglish(token) and language_flag == "English":
token_list_all.append(token_list_tmp)
- langauge_list.append('English')
+ langauge_list.append("English")
token_list_tmp = []
token_list_tmp.append(token)
if isEnglish(token):
- language_flag = 'English'
+ language_flag = "English"
else:
- language_flag = 'Chinese'
+ language_flag = "Chinese"
if token_list_tmp:
token_list_all.append(token_list_tmp)
@@ -65,7 +50,7 @@
result_list = []
for token_list_tmp, language_flag in zip(token_list_all, langauge_list):
- if language_flag == 'English':
+ if language_flag == "English":
result_list.extend(token_list_tmp)
else:
seg_list = jieba_usr_dict.cut(join_chinese_and_english(token_list_tmp), HMM=False)
@@ -93,17 +78,19 @@
words.append(current_word)
return words
-def isEnglish(text:str):
- if re.search('^[a-zA-Z\']+$', text):
+
+def isEnglish(text: str):
+ if re.search("^[a-zA-Z']+$", text):
return True
else:
return False
+
def join_chinese_and_english(input_list):
- line = ''
+ line = ""
for token in input_list:
if isEnglish(token):
- line = line + ' ' + token
+ line = line + " " + token
else:
line = line + token
--
Gitblit v1.9.1