From 81c991f1d18df89704fd968179456be93f30668a Mon Sep 17 00:00:00 2001
From: speech_asr <wangjiaming.wjm@alibaba-inc.com>
Date: 星期三, 08 三月 2023 16:38:46 +0800
Subject: [PATCH] update eend_ola

---
 funasr/modules/eend_ola/__init__.py                  |    0 
 funasr/modules/eend_ola/encoder_decoder_attractor.py |   50 ++++++++++
 funasr/modules/eend_ola/encoder.py                   |  127 +++++++++++++++++++++++++
 funasr/modules/eend_ola/utils/power.py               |    0 
 funasr/modules/eend_ola/utils/losses.py              |   77 +++++++++++++++
 5 files changed, 254 insertions(+), 0 deletions(-)

diff --git a/funasr/modules/eend_ola/__init__.py b/funasr/modules/eend_ola/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/modules/eend_ola/__init__.py
diff --git a/funasr/modules/eend_ola/encoder.py b/funasr/modules/eend_ola/encoder.py
new file mode 100644
index 0000000..17d11ac
--- /dev/null
+++ b/funasr/modules/eend_ola/encoder.py
@@ -0,0 +1,127 @@
+import math
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+
+class MultiHeadSelfAttention(nn.Module):
+    def __init__(self, n_units, h=8, dropout_rate=0.1):
+        super(MultiHeadSelfAttention, self).__init__()
+        self.linearQ = nn.Linear(n_units, n_units)
+        self.linearK = nn.Linear(n_units, n_units)
+        self.linearV = nn.Linear(n_units, n_units)
+        self.linearO = nn.Linear(n_units, n_units)
+        self.d_k = n_units // h
+        self.h = h
+        self.dropout = nn.Dropout(dropout_rate)
+
+    def __call__(self, x, batch_size, x_mask):
+        q = self.linearQ(x).view(batch_size, -1, self.h, self.d_k)
+        k = self.linearK(x).view(batch_size, -1, self.h, self.d_k)
+        v = self.linearV(x).view(batch_size, -1, self.h, self.d_k)
+        scores = torch.matmul(
+            q.permute(0, 2, 1, 3), k.permute(0, 2, 3, 1)) / math.sqrt(self.d_k)
+        if x_mask is not None:
+            x_mask = x_mask.unsqueeze(1)
+            scores = scores.masked_fill(x_mask == 0, -1e9)
+        self.att = F.softmax(scores, dim=3)
+        p_att = self.dropout(self.att)
+        x = torch.matmul(p_att, v.permute(0, 2, 1, 3))
+        x = x.permute(0, 2, 1, 3).contiguous().view(-1, self.h * self.d_k)
+        return self.linearO(x)
+
+
+class PositionwiseFeedForward(nn.Module):
+    def __init__(self, n_units, d_units, dropout_rate):
+        super(PositionwiseFeedForward, self).__init__()
+        self.linear1 = nn.Linear(n_units, d_units)
+        self.linear2 = nn.Linear(d_units, n_units)
+        self.dropout = nn.Dropout(dropout_rate)
+
+    def __call__(self, x):
+        return self.linear2(self.dropout(F.relu(self.linear1(x))))
+
+
+class PositionalEncoding(torch.nn.Module):
+    def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
+        super(PositionalEncoding, self).__init__()
+        self.d_model = d_model
+        self.reverse = reverse
+        self.xscale = math.sqrt(self.d_model)
+        self.dropout = torch.nn.Dropout(p=dropout_rate)
+        self.pe = None
+        self.extend_pe(torch.tensor(0.0).expand(1, max_len))
+
+    def extend_pe(self, x):
+        if self.pe is not None:
+            if self.pe.size(1) >= x.size(1):
+                if self.pe.dtype != x.dtype or self.pe.device != x.device:
+                    self.pe = self.pe.to(dtype=x.dtype, device=x.device)
+                return
+        pe = torch.zeros(x.size(1), self.d_model)
+        if self.reverse:
+            position = torch.arange(
+                x.size(1) - 1, -1, -1.0, dtype=torch.float32
+            ).unsqueeze(1)
+        else:
+            position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
+        div_term = torch.exp(
+            torch.arange(0, self.d_model, 2, dtype=torch.float32)
+            * -(math.log(10000.0) / self.d_model)
+        )
+        pe[:, 0::2] = torch.sin(position * div_term)
+        pe[:, 1::2] = torch.cos(position * div_term)
+        pe = pe.unsqueeze(0)
+        self.pe = pe.to(device=x.device, dtype=x.dtype)
+
+    def forward(self, x: torch.Tensor):
+        self.extend_pe(x)
+        x = x * self.xscale + self.pe[:, : x.size(1)]
+        return self.dropout(x)
+
+
+class TransformerEncoder(nn.Module):
+    def __init__(self, idim, n_layers, n_units,
+                 e_units=2048, h=8, dropout_rate=0.1, use_pos_emb=False):
+        super(TransformerEncoder, self).__init__()
+        self.lnorm_in = nn.LayerNorm(n_units)
+        self.n_layers = n_layers
+        self.dropout = nn.Dropout(dropout_rate)
+        for i in range(n_layers):
+            setattr(self, '{}{:d}'.format("lnorm1_", i),
+                    nn.LayerNorm(n_units))
+            setattr(self, '{}{:d}'.format("self_att_", i),
+                    MultiHeadSelfAttention(n_units, h))
+            setattr(self, '{}{:d}'.format("lnorm2_", i),
+                    nn.LayerNorm(n_units))
+            setattr(self, '{}{:d}'.format("ff_", i),
+                    PositionwiseFeedForward(n_units, e_units, dropout_rate))
+        self.lnorm_out = nn.LayerNorm(n_units)
+        if use_pos_emb:
+            self.pos_enc = torch.nn.Sequential(
+                torch.nn.Linear(idim, n_units),
+                torch.nn.LayerNorm(n_units),
+                torch.nn.Dropout(dropout_rate),
+                torch.nn.ReLU(),
+                PositionalEncoding(n_units, dropout_rate),
+            )
+        else:
+            self.linear_in = nn.Linear(idim, n_units)
+            self.pos_enc = None
+
+    def __call__(self, x, x_mask=None):
+        BT_size = x.shape[0] * x.shape[1]
+        if self.pos_enc is not None:
+            e = self.pos_enc(x)
+            e = e.view(BT_size, -1)
+        else:
+            e = self.linear_in(x.reshape(BT_size, -1))
+        for i in range(self.n_layers):
+            e = getattr(self, '{}{:d}'.format("lnorm1_", i))(e)
+            s = getattr(self, '{}{:d}'.format("self_att_", i))(e, x.shape[0], x_mask)
+            e = e + self.dropout(s)
+            e = getattr(self, '{}{:d}'.format("lnorm2_", i))(e)
+            s = getattr(self, '{}{:d}'.format("ff_", i))(e)
+            e = e + self.dropout(s)
+        return self.lnorm_out(e)
diff --git a/funasr/modules/eend_ola/encoder_decoder_attractor.py b/funasr/modules/eend_ola/encoder_decoder_attractor.py
new file mode 100644
index 0000000..db01b00
--- /dev/null
+++ b/funasr/modules/eend_ola/encoder_decoder_attractor.py
@@ -0,0 +1,50 @@
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+
+class EncoderDecoderAttractor(nn.Module):
+
+    def __init__(self, n_units, encoder_dropout=0.1, decoder_dropout=0.1):
+        super(EncoderDecoderAttractor, self).__init__()
+        self.enc0_dropout = nn.Dropout(encoder_dropout)
+        self.encoder = nn.LSTM(n_units, n_units, 1, batch_first=True, dropout=encoder_dropout)
+        self.dec0_dropout = nn.Dropout(decoder_dropout)
+        self.decoder = nn.LSTM(n_units, n_units, 1, batch_first=True, dropout=decoder_dropout)
+        self.counter = nn.Linear(n_units, 1)
+        self.n_units = n_units
+
+    def forward_core(self, xs, zeros):
+        ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).to(torch.float32).to(xs[0].device)
+        xs = [self.enc0_dropout(x) for x in xs]
+        xs = nn.utils.rnn.pad_sequence(xs, batch_first=True, padding_value=-1)
+        xs = nn.utils.rnn.pack_padded_sequence(xs, ilens, batch_first=True, enforce_sorted=False)
+        _, (hx, cx) = self.encoder(xs)
+        zlens = torch.from_numpy(np.array([z.shape[0] for z in zeros])).to(torch.float32).to(zeros[0].device)
+        max_zlen = torch.max(zlens).to(torch.int).item()
+        zeros = [self.enc0_dropout(z) for z in zeros]
+        zeros = nn.utils.rnn.pad_sequence(zeros, batch_first=True, padding_value=-1)
+        zeros = nn.utils.rnn.pack_padded_sequence(zeros, zlens, batch_first=True, enforce_sorted=False)
+        attractors, (_, _) = self.decoder(zeros, (hx, cx))
+        attractors = nn.utils.rnn.pad_packed_sequence(attractors, batch_first=True, padding_value=-1,
+                                                      total_length=max_zlen)[0]
+        attractors = [att[:zlens[i].to(torch.int).item()] for i, att in enumerate(attractors)]
+        return attractors
+
+    def forward(self, xs, n_speakers):
+        zeros = [torch.zeros(n_spk + 1, self.n_units).to(torch.float32).to(xs[0].device) for n_spk in n_speakers]
+        attractors = self.forward_core(xs, zeros)
+        labels = torch.cat([torch.from_numpy(np.array([[1] * n_spk + [0]], np.float32)) for n_spk in n_speakers], dim=1)
+        labels = labels.to(xs[0].device)
+        logit = torch.cat([self.counter(att).view(-1, n_spk + 1) for att, n_spk in zip(attractors, n_speakers)], dim=1)
+        loss = F.binary_cross_entropy(torch.sigmoid(logit), labels)
+
+        attractors = [att[slice(0, att.shape[0] - 1)] for att in attractors]
+        return loss, attractors
+
+    def estimate(self, xs, max_n_speakers=15):
+        zeros = [torch.zeros(max_n_speakers, self.n_units).to(torch.float32).to(xs[0].device) for _ in xs]
+        attractors = self.forward_core(xs, zeros)
+        probs = [torch.sigmoid(torch.flatten(self.counter(att))) for att in attractors]
+        return attractors, probs
\ No newline at end of file
diff --git a/funasr/modules/eend_ola/utils/losses.py b/funasr/modules/eend_ola/utils/losses.py
new file mode 100644
index 0000000..97443bc
--- /dev/null
+++ b/funasr/modules/eend_ola/utils/losses.py
@@ -0,0 +1,77 @@
+import numpy as np
+import torch
+import torch.nn.functional as F
+from itertools import permutations
+from torch import nn
+
+
+def standard_loss(ys, ts, label_delay=0):
+    losses = [F.binary_cross_entropy(torch.sigmoid(y), t) * len(y) for y, t in zip(ys, ts)]
+    loss = torch.sum(torch.stack(losses))
+    n_frames = torch.from_numpy(np.array(np.sum([t.shape[0] for t in ts]))).to(torch.float32).to(ys[0].device)  # 璁$畻鎬荤殑甯ф暟
+    loss = loss / n_frames
+    return loss
+
+
+def batch_pit_loss(ys, ts, label_delay=0):
+    loss_w_labels = [pit_loss(y, t)
+                     for (y, t) in zip(ys, ts)]
+    losses, labels = zip(*loss_w_labels)
+    loss = torch.sum(torch.stack(losses))
+    n_frames = torch.sum(torch.stack([t.shape[0] for t in ts]))
+    loss = loss / n_frames
+    return loss, labels
+
+
+def batch_pit_n_speaker_loss(ys, ts, n_speakers_list):
+    max_n_speakers = ts[0].shape[1]
+    olens = [y.shape[0] for y in ys]
+    ys = nn.utils.rnn.pad_sequence(ys, batch_first=True, padding_value=-1)
+    ys_mask = [torch.ones(olen).to(ys.device) for olen in olens]
+    ys_mask = torch.nn.utils.rnn.pad_sequence(ys_mask, batch_first=True, padding_value=0).unsqueeze(-1)
+
+    losses = []
+    for shift in range(max_n_speakers):
+        ts_roll = [torch.roll(t, -shift, dims=1) for t in ts]
+        ts_roll = nn.utils.rnn.pad_sequence(ts_roll, batch_first=True, padding_value=-1)
+        loss = F.binary_cross_entropy(torch.sigmoid(ys), ts_roll, reduction='none')
+        if ys_mask is not None:
+            loss = loss * ys_mask
+        loss = torch.sum(loss, dim=1)
+        losses.append(loss)
+    losses = torch.stack(losses, dim=2)
+
+    perms = np.array(list(permutations(range(max_n_speakers)))).astype(np.float32)
+    perms = torch.from_numpy(perms).to(losses.device)
+    y_ind = torch.arange(max_n_speakers, dtype=torch.float32, device=losses.device)
+    t_inds = torch.fmod(perms - y_ind, max_n_speakers).to(torch.long)
+
+    losses_perm = []
+    for t_ind in t_inds:
+        losses_perm.append(
+            torch.mean(losses[:, y_ind.to(torch.long), t_ind], dim=1))
+    losses_perm = torch.stack(losses_perm, dim=1)
+
+    def select_perm_indices(num, max_num):
+        perms = list(permutations(range(max_num)))
+        sub_perms = list(permutations(range(num)))
+        return [
+            [x[:num] for x in perms].index(perm)
+            for perm in sub_perms]
+
+    masks = torch.full_like(losses_perm, device=losses.device, fill_value=float('inf'))
+    for i, t in enumerate(ts):
+        n_speakers = n_speakers_list[i]
+        indices = select_perm_indices(n_speakers, max_n_speakers)
+        masks[i, indices] = 0
+    losses_perm += masks
+
+    min_loss = torch.sum(torch.min(losses_perm, dim=1)[0])
+    n_frames = torch.from_numpy(np.array(np.sum([t.shape[0] for t in ts]))).to(losses.device)
+    min_loss = min_loss / n_frames
+
+    min_indices = torch.argmin(losses_perm, dim=1)
+    labels_perm = [t[:, perms[idx].to(torch.long)] for t, idx in zip(ts, min_indices)]
+    labels_perm = [t[:, :n_speakers] for t, n_speakers in zip(labels_perm, n_speakers_list)]
+
+    return min_loss, labels_perm
diff --git a/funasr/modules/eend_ola/utils/power.py b/funasr/modules/eend_ola/utils/power.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/modules/eend_ola/utils/power.py

--
Gitblit v1.9.1