From 3919d7454c070702e94b149e4032e9db08d28fa3 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 22 一月 2024 15:42:45 +0800
Subject: [PATCH] Funasr1.0 (#1279)
---
funasr/models/paraformer/model.py | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/funasr/models/paraformer/model.py b/funasr/models/paraformer/model.py
index f92441d..6e422ad 100644
--- a/funasr/models/paraformer/model.py
+++ b/funasr/models/paraformer/model.py
@@ -33,7 +33,6 @@
def __init__(
self,
- # token_list: Union[Tuple[str, ...], List[str]],
specaug: Optional[str] = None,
specaug_conf: Optional[Dict] = None,
normalize: str = None,
@@ -164,6 +163,7 @@
self.use_1st_decoder_loss = use_1st_decoder_loss
self.length_normalized_loss = length_normalized_loss
self.beam_search = None
+ self.error_calculator = None
def forward(
self,
@@ -451,7 +451,7 @@
self.nbest = kwargs.get("nbest", 1)
meta_data = {}
- if isinstance(data_in, torch.Tensor): # fbank
+ if isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank": # fbank
speech, speech_lengths = data_in, data_lengths
if len(speech.shape) < 3:
speech = speech[None, :, :]
@@ -528,9 +528,9 @@
if tokenizer is not None:
# Change integer-ids to tokens
token = tokenizer.ids2tokens(token_int)
- text = tokenizer.tokens2text(token)
-
- text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
+ text_postprocessed = tokenizer.tokens2text(token)
+ if not hasattr(tokenizer, "bpemodel"):
+ text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
result_i = {"key": key[i], "text": text_postprocessed}
--
Gitblit v1.9.1