From 810046e3df4910c8f5c1a90e4b53aca45b3397e8 Mon Sep 17 00:00:00 2001
From: wuhongsheng <664116298@qq.com>
Date: 星期一, 01 七月 2024 10:42:58 +0800
Subject: [PATCH] 优化merge segments 参数,解决新闻联播男女主持人“晚上好”合并一个speakid问题 (#1861)
---
funasr/tokenizer/hf_tokenizer.py | 19 ++++++++-----------
1 files changed, 8 insertions(+), 11 deletions(-)
diff --git a/funasr/tokenizer/hf_tokenizer.py b/funasr/tokenizer/hf_tokenizer.py
index b6043e9..1ac7ae2 100644
--- a/funasr/tokenizer/hf_tokenizer.py
+++ b/funasr/tokenizer/hf_tokenizer.py
@@ -1,16 +1,13 @@
-
-
-
from funasr.register import tables
+
@tables.register("tokenizer_classes", "HuggingfaceTokenizer")
def HuggingfaceTokenizer(init_param_path, **kwargs):
- try:
- from transformers import AutoTokenizer
- except:
- # print("If you want to use hugging, please `pip install -U transformers`")
- pass
- tokenizer = AutoTokenizer.from_pretrained(init_param_path)
-
- return tokenizer
+ try:
+ from transformers import AutoTokenizer
+ except:
+ # print("If you want to use hugging, please `pip install -U transformers`")
+ pass
+ tokenizer = AutoTokenizer.from_pretrained(init_param_path)
+ return tokenizer
--
Gitblit v1.9.1