funasr/auto/auto_model.py
@@ -199,6 +199,7 @@ tokenizers_build = [] vocab_sizes = [] token_lists = [] ### === only for kws === token_list_files = kwargs.get("token_lists", []) seg_dicts = kwargs.get("seg_dicts", []) @@ -213,9 +214,9 @@ ### === only for kws === if len(token_list_files) > 1: tokenizer_conf.token_list = token_list_files[i] tokenizer_conf["token_list"] = token_list_files[i] if len(seg_dicts) > 1: tokenizer_conf.seg_dict = seg_dicts[i] tokenizer_conf["seg_dict"] = seg_dicts[i] ### === only for kws === tokenizer = tokenizer_class(**tokenizer_conf)