From b2be308de0a4d75c3645e55c26d33a58446a16ff Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 07 六月 2024 02:51:18 +0800
Subject: [PATCH] auto frontend
---
funasr/models/llm_asr/model.py | 9 ++++++++-
1 files changed, 8 insertions(+), 1 deletions(-)
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 0d9bf7f..e2880c8 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -468,7 +468,7 @@
if len(speech_lengths.size()) > 1:
speech_lengths = speech_lengths[:, 0]
- batch_size = speech.shape[0]
+ batch_size, frames, _ = speech.shape
# audio encoder
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
@@ -499,6 +499,13 @@
stats["acc"] = acc_att
stats["loss"] = torch.clone(loss.detach())
+ stats["batch_size"] = batch_size
+ stats["batch_size_x_frames"] = frames * batch_size
+ stats["batch_size_real_frames"] = speech_lengths.sum().item()
+ stats["padding_frames"] = stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
+ stats["batch_size_x_tokens"] = token_num * batch_size
+ stats["batch_size_real_tokens"] = attention_mask.sum().item()
+ stats["padding_tokens"] = stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
# force_gatherable: to-device and to-tensor if scalar for DataParallel
if self.length_normalized_loss:
--
Gitblit v1.9.1