From d2dc3af1a69ee4075bcfc0c83dc0fb8e3fc1db4e Mon Sep 17 00:00:00 2001
From: yhliang <68215459+yhliang-aslp@users.noreply.github.com>
Date: 星期四, 11 五月 2023 16:31:40 +0800
Subject: [PATCH] Merge pull request #492 from alibaba-damo-academy/dev_smohan
---
funasr/modules/eend_ola/utils/losses.py | 12 +-----------
1 files changed, 1 insertions(+), 11 deletions(-)
diff --git a/funasr/modules/eend_ola/utils/losses.py b/funasr/modules/eend_ola/utils/losses.py
index 97443bc..af0181d 100644
--- a/funasr/modules/eend_ola/utils/losses.py
+++ b/funasr/modules/eend_ola/utils/losses.py
@@ -8,19 +8,9 @@
def standard_loss(ys, ts, label_delay=0):
losses = [F.binary_cross_entropy(torch.sigmoid(y), t) * len(y) for y, t in zip(ys, ts)]
loss = torch.sum(torch.stack(losses))
- n_frames = torch.from_numpy(np.array(np.sum([t.shape[0] for t in ts]))).to(torch.float32).to(ys[0].device) # 璁$畻鎬荤殑甯ф暟
+ n_frames = torch.from_numpy(np.array(np.sum([t.shape[0] for t in ts]))).to(torch.float32).to(ys[0].device)
loss = loss / n_frames
return loss
-
-
-def batch_pit_loss(ys, ts, label_delay=0):
- loss_w_labels = [pit_loss(y, t)
- for (y, t) in zip(ys, ts)]
- losses, labels = zip(*loss_w_labels)
- loss = torch.sum(torch.stack(losses))
- n_frames = torch.sum(torch.stack([t.shape[0] for t in ts]))
- loss = loss / n_frames
- return loss, labels
def batch_pit_n_speaker_loss(ys, ts, n_speakers_list):
--
Gitblit v1.9.1