From 54931dd4e1a099d7d6f144c4e12e5453deb3aa26 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期三, 28 六月 2023 10:41:57 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main
---
funasr/models/e2e_diar_sond.py | 49 ++++++++++++++++++++++++++++++-------------------
1 files changed, 30 insertions(+), 19 deletions(-)
diff --git a/funasr/models/e2e_diar_sond.py b/funasr/models/e2e_diar_sond.py
index e68d16b..9c3fb92 100644
--- a/funasr/models/e2e_diar_sond.py
+++ b/funasr/models/e2e_diar_sond.py
@@ -22,7 +22,7 @@
from funasr.models.specaug.abs_specaug import AbsSpecAug
from funasr.layers.abs_normalize import AbsNormalize
from funasr.torch_utils.device_funcs import force_gatherable
-from funasr.train.abs_espnet_model import AbsESPnetModel
+from funasr.models.base_model import FunASRModel
from funasr.losses.label_smoothing_loss import LabelSmoothingLoss, SequenceBinaryCrossEntropy
from funasr.utils.misc import int2vec
@@ -35,9 +35,13 @@
yield
-class DiarSondModel(AbsESPnetModel):
- """Speaker overlap-aware neural diarization model
- reference: https://arxiv.org/abs/2211.10243
+class DiarSondModel(FunASRModel):
+ """
+ Author: Speech Lab, Alibaba Group, China
+ SOND: Speaker Overlap-aware Neural Diarization for Multi-party Meeting Analysis
+ https://arxiv.org/abs/2211.10243
+ TOLD: A Novel Two-Stage Overlap-Aware Framework for Speaker Diarization
+ https://arxiv.org/abs/2303.05397
"""
def __init__(
@@ -59,7 +63,8 @@
normalize_speech_speaker: bool = False,
ignore_id: int = -1,
speaker_discrimination_loss_weight: float = 1.0,
- inter_score_loss_weight: float = 0.0
+ inter_score_loss_weight: float = 0.0,
+ inputs_type: str = "raw",
):
assert check_argument_types()
@@ -85,15 +90,13 @@
normalize_length=length_normalized_loss,
)
self.criterion_bce = SequenceBinaryCrossEntropy(normalize_length=length_normalized_loss)
- pse_embedding = self.generate_pse_embedding()
- self.register_buffer("pse_embedding", pse_embedding)
- power_weight = torch.from_numpy(2 ** np.arange(max_spk_num)[np.newaxis, np.newaxis, :]).float()
- self.register_buffer("power_weight", power_weight)
- int_token_arr = torch.from_numpy(np.array(self.token_list).astype(int)[np.newaxis, np.newaxis, :]).int()
- self.register_buffer("int_token_arr", int_token_arr)
+ self.pse_embedding = self.generate_pse_embedding()
+ self.power_weight = torch.from_numpy(2 ** np.arange(max_spk_num)[np.newaxis, np.newaxis, :]).float()
+ self.int_token_arr = torch.from_numpy(np.array(self.token_list).astype(int)[np.newaxis, np.newaxis, :]).int()
self.speaker_discrimination_loss_weight = speaker_discrimination_loss_weight
self.inter_score_loss_weight = inter_score_loss_weight
self.forward_steps = 0
+ self.inputs_type = inputs_type
def generate_pse_embedding(self):
embedding = np.zeros((len(self.token_list), self.max_spk_num), dtype=np.float)
@@ -112,7 +115,6 @@
binary_labels_lengths: torch.Tensor = None,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
"""Frontend + Encoder + Speaker Encoder + CI Scorer + CD Scorer + Decoder + Calc loss
-
Args:
speech: (Batch, samples) or (Batch, frames, input_size)
speech_lengths: (Batch,) default None for chunk interator,
@@ -125,9 +127,14 @@
binary_labels: (Batch, frames, max_spk_num)
binary_labels_lengths: (Batch,)
"""
- assert speech.shape[0] == binary_labels.shape[0], (speech.shape, binary_labels.shape)
+ assert speech.shape[0] <= binary_labels.shape[0], (speech.shape, binary_labels.shape)
batch_size = speech.shape[0]
self.forward_steps = self.forward_steps + 1
+ if self.pse_embedding.device != speech.device:
+ self.pse_embedding = self.pse_embedding.to(speech.device)
+ self.power_weight = self.power_weight.to(speech.device)
+ self.int_token_arr = self.int_token_arr.to(speech.device)
+
# 1. Network forward
pred, inter_outputs = self.prediction_forward(
speech, speech_lengths,
@@ -149,9 +156,13 @@
# the sequence length of 'pred' might be slightly less than the
# length of 'spk_labels'. Here we force them to be equal.
length_diff_tolerance = 2
- length_diff = pse_labels.shape[1] - pred.shape[1]
- if 0 < length_diff <= length_diff_tolerance:
- pse_labels = pse_labels[:, 0: pred.shape[1]]
+ length_diff = abs(pse_labels.shape[1] - pred.shape[1])
+ if length_diff <= length_diff_tolerance:
+ min_len = min(pred.shape[1], pse_labels.shape[1])
+ pse_labels = pse_labels[:, :min_len]
+ pred = pred[:, :min_len]
+ cd_score = cd_score[:, :min_len]
+ ci_score = ci_score[:, :min_len]
loss_diar = self.classification_loss(pred, pse_labels, binary_labels_lengths)
loss_spk_dis = self.speaker_discrimination_loss(profile, profile_lengths)
@@ -299,7 +310,7 @@
speech: torch.Tensor,
speech_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
- if self.encoder is not None:
+ if self.encoder is not None and self.inputs_type == "raw":
speech, speech_lengths = self.encode(speech, speech_lengths)
speech_mask = ~make_pad_mask(speech_lengths, maxlen=speech.shape[1])
speech_mask = speech_mask.to(speech.device).unsqueeze(-1).float()
@@ -342,6 +353,7 @@
if isinstance(self.ci_scorer, AbsEncoder):
ci_simi = self.ci_scorer(ge_in, ge_len)[0]
+ ci_simi = torch.reshape(ci_simi, [bb, self.max_spk_num, tt]).permute([0, 2, 1])
else:
ci_simi = self.ci_scorer(speech_encoder_outputs, speaker_encoder_outputs)
@@ -378,7 +390,6 @@
self, speech: torch.Tensor, speech_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Frontend + Encoder
-
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch,)
@@ -478,4 +489,4 @@
speaker_miss,
speaker_falarm,
speaker_error,
- )
+ )
\ No newline at end of file
--
Gitblit v1.9.1