From 911d450a596a711d6faea37c2abfba13d3a511fd Mon Sep 17 00:00:00 2001
From: haoneng.lhn <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 27 四月 2023 14:15:11 +0800
Subject: [PATCH] Merge branch 'dev_lhn' into dev_websocket
---
funasr/modules/e2e_asr_common.py | 8 ++++++--
1 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/funasr/modules/e2e_asr_common.py b/funasr/modules/e2e_asr_common.py
index f430fcb..98006f9 100644
--- a/funasr/modules/e2e_asr_common.py
+++ b/funasr/modules/e2e_asr_common.py
@@ -296,12 +296,13 @@
self.report_wer = report_wer
def __call__(
- self, encoder_out: torch.Tensor, target: torch.Tensor
+ self, encoder_out: torch.Tensor, target: torch.Tensor, encoder_out_lens: torch.Tensor,
) -> Tuple[Optional[float], Optional[float]]:
"""Calculate sentence-level WER or/and CER score for Transducer model.
Args:
encoder_out: Encoder output sequences. (B, T, D_enc)
target: Target label ID sequences. (B, L)
+ encoder_out_lens: Encoder output sequences length. (B,)
Returns:
: Sentence-level CER score.
: Sentence-level WER score.
@@ -312,7 +313,10 @@
encoder_out = encoder_out.to(next(self.decoder.parameters()).device)
- batch_nbest = [self.beam_search(encoder_out[b]) for b in range(batchsize)]
+ batch_nbest = [
+ self.beam_search(encoder_out[b][: encoder_out_lens[b]])
+ for b in range(batchsize)
+ ]
pred = [nbest_hyp[0].yseq[1:] for nbest_hyp in batch_nbest]
char_pred, char_target = self.convert_to_char(pred, target)
--
Gitblit v1.9.1