From d20c030e5b75306dd67e8fe9924d5d94eac1bf30 Mon Sep 17 00:00:00 2001
From: wusong <63332221+wusong1128@users.noreply.github.com>
Date: 星期三, 25 九月 2024 15:11:50 +0800
Subject: [PATCH] 解决python ws服务针对尾部非人声录音无结束标识返回的问题 (#2102)
---
funasr/models/sense_voice/model.py | 10 +++++++---
1 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index cf4f7fb..1311987 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -644,6 +644,7 @@
self.embed = torch.nn.Embedding(
7 + len(self.lid_dict) + len(self.textnorm_dict), input_size
)
+ self.emo_dict = {"unk": 25009, "happy": 25001, "sad": 25002, "angry": 25003, "neutral": 25004}
self.criterion_att = LabelSmoothingLoss(
size=self.vocab_size,
@@ -697,10 +698,11 @@
loss_rich, acc_rich = self._calc_rich_ce_loss(encoder_out[:, :4, :], text[:, :4])
- loss = loss_ctc
+ loss = loss_ctc + loss_rich
# Collect total loss stats
- stats["loss"] = torch.clone(loss.detach()) if loss_ctc is not None else None
+ stats["loss_ctc"] = torch.clone(loss_ctc.detach()) if loss_ctc is not None else None
stats["loss_rich"] = torch.clone(loss_rich.detach()) if loss_rich is not None else None
+ stats["loss"] = torch.clone(loss.detach()) if loss is not None else None
stats["acc_rich"] = acc_rich
# force_gatherable: to-device and to-tensor if scalar for DataParallel
@@ -870,7 +872,9 @@
# c. Passed the encoder result and the beam search
ctc_logits = self.ctc.log_softmax(encoder_out)
-
+ if kwargs.get("ban_emo_unk", False):
+ ctc_logits[:, :, self.emo_dict["unk"]] = -float("inf")
+
results = []
b, n, d = encoder_out.size()
if isinstance(key[0], (list, tuple)):
--
Gitblit v1.9.1