From 70645e48072bf193fbf069949f1d2b10fddac8a3 Mon Sep 17 00:00:00 2001
From: pointerhacker <145901472+pointerhacker@users.noreply.github.com>
Date: 星期二, 15 十月 2024 17:50:51 +0800
Subject: [PATCH] 数据并行可能导致的模型训练报错 (#2139)
---
funasr/auto/auto_model.py | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 9f5f4fb..71f44b4 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -199,6 +199,7 @@
tokenizers_build = []
vocab_sizes = []
token_lists = []
+
### === only for kws ===
token_list_files = kwargs.get("token_lists", [])
seg_dicts = kwargs.get("seg_dicts", [])
@@ -213,9 +214,9 @@
### === only for kws ===
if len(token_list_files) > 1:
- tokenizer_conf.token_list = token_list_files[i]
+ tokenizer_conf["token_list"] = token_list_files[i]
if len(seg_dicts) > 1:
- tokenizer_conf.seg_dict = seg_dicts[i]
+ tokenizer_conf["seg_dict"] = seg_dicts[i]
### === only for kws ===
tokenizer = tokenizer_class(**tokenizer_conf)
@@ -228,8 +229,8 @@
if token_list is not None:
vocab_size = len(token_list)
- if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
- vocab_size = tokenizer.get_vocab_size()
+ if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
+ vocab_size = tokenizer.get_vocab_size()
token_lists.append(token_list)
vocab_sizes.append(vocab_size)
--
Gitblit v1.9.1