From dee1354d0d984df21d16a2eba5bacec31bfb0b4b Mon Sep 17 00:00:00 2001
From: 维石 <shixian.shi@alibaba-inc.com>
Date: 星期五, 19 四月 2024 14:57:31 +0800
Subject: [PATCH] empty result bug fix

---
 funasr/models/bicif_paraformer/cif_predictor.py |  201 +++++++++++++++++++++++++++++++++++++++++++++-----
 1 files changed, 181 insertions(+), 20 deletions(-)

diff --git a/funasr/models/bicif_paraformer/cif_predictor.py b/funasr/models/bicif_paraformer/cif_predictor.py
index 5a1488e..2cdbc16 100644
--- a/funasr/models/bicif_paraformer/cif_predictor.py
+++ b/funasr/models/bicif_paraformer/cif_predictor.py
@@ -1,17 +1,15 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 import torch
-from torch import nn
-from torch import Tensor
-import logging
-import numpy as np
-from funasr.train_utils.device_funcs import to_device
-from funasr.models.transformer.utils.nets_utils import make_pad_mask
-from funasr.models.scama.utils import sequence_mask
-from typing import Optional, Tuple
 
 from funasr.register import tables
+from funasr.models.transformer.utils.nets_utils import make_pad_mask
 
 
-class mae_loss(nn.Module):
+class mae_loss(torch.nn.Module):
 
     def __init__(self, normalize_length=False):
         super(mae_loss, self).__init__()
@@ -95,7 +93,7 @@
     return fires
 
 @tables.register("predictor_classes", "CifPredictorV3")
-class CifPredictorV3(nn.Module):
+class CifPredictorV3(torch.nn.Module):
     def __init__(self,
                  idim,
                  l_order,
@@ -116,9 +114,9 @@
                  ):
         super(CifPredictorV3, self).__init__()
 
-        self.pad = nn.ConstantPad1d((l_order, r_order), 0)
-        self.cif_conv1d = nn.Conv1d(idim, idim, l_order + r_order + 1)
-        self.cif_output = nn.Linear(idim, 1)
+        self.pad = torch.nn.ConstantPad1d((l_order, r_order), 0)
+        self.cif_conv1d = torch.nn.Conv1d(idim, idim, l_order + r_order + 1)
+        self.cif_output = torch.nn.Linear(idim, 1)
         self.dropout = torch.nn.Dropout(p=dropout)
         self.threshold = threshold
         self.smooth_factor = smooth_factor
@@ -131,14 +129,14 @@
         self.upsample_type = upsample_type
         self.use_cif1_cnn = use_cif1_cnn
         if self.upsample_type == 'cnn':
-            self.upsample_cnn = nn.ConvTranspose1d(idim, idim, self.upsample_times, self.upsample_times)
-            self.cif_output2 = nn.Linear(idim, 1)
+            self.upsample_cnn = torch.nn.ConvTranspose1d(idim, idim, self.upsample_times, self.upsample_times)
+            self.cif_output2 = torch.nn.Linear(idim, 1)
         elif self.upsample_type == 'cnn_blstm':
-            self.upsample_cnn = nn.ConvTranspose1d(idim, idim, self.upsample_times, self.upsample_times)
-            self.blstm = nn.LSTM(idim, idim, 1, bias=True, batch_first=True, dropout=0.0, bidirectional=True)
-            self.cif_output2 = nn.Linear(idim*2, 1)
+            self.upsample_cnn = torch.nn.ConvTranspose1d(idim, idim, self.upsample_times, self.upsample_times)
+            self.blstm = torch.nn.LSTM(idim, idim, 1, bias=True, batch_first=True, dropout=0.0, bidirectional=True)
+            self.cif_output2 = torch.nn.Linear(idim*2, 1)
         elif self.upsample_type == 'cnn_attn':
-            self.upsample_cnn = nn.ConvTranspose1d(idim, idim, self.upsample_times, self.upsample_times)
+            self.upsample_cnn = torch.nn.ConvTranspose1d(idim, idim, self.upsample_times, self.upsample_times)
             from funasr.models.transformer.encoder import EncoderLayer as TransformerEncoderLayer
             from funasr.models.transformer.attention import MultiHeadedAttention
             from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForward
@@ -157,7 +155,7 @@
                 True, #normalize_before,
                 False, #concat_after,
             )
-            self.cif_output2 = nn.Linear(idim, 1)
+            self.cif_output2 = torch.nn.Linear(idim, 1)
         self.smooth_factor2 = smooth_factor2
         self.noise_threshold2 = noise_threshold2
 
@@ -338,3 +336,166 @@
         predictor_alignments = index_div_bool_zeros_count_tile_out
         predictor_alignments_length = predictor_alignments.sum(-1).type(encoder_sequence_length.dtype)
         return predictor_alignments.detach(), predictor_alignments_length.detach()
+
+@tables.register("predictor_classes", "CifPredictorV3Export")
+class CifPredictorV3Export(torch.nn.Module):
+    def __init__(self, model, **kwargs):
+        super().__init__()
+        
+        self.pad = model.pad
+        self.cif_conv1d = model.cif_conv1d
+        self.cif_output = model.cif_output
+        self.threshold = model.threshold
+        self.smooth_factor = model.smooth_factor
+        self.noise_threshold = model.noise_threshold
+        self.tail_threshold = model.tail_threshold
+        
+        self.upsample_times = model.upsample_times
+        self.upsample_cnn = model.upsample_cnn
+        self.blstm = model.blstm
+        self.cif_output2 = model.cif_output2
+        self.smooth_factor2 = model.smooth_factor2
+        self.noise_threshold2 = model.noise_threshold2
+    
+    def forward(self, hidden: torch.Tensor,
+                mask: torch.Tensor,
+                ):
+        h = hidden
+        context = h.transpose(1, 2)
+        queries = self.pad(context)
+        output = torch.relu(self.cif_conv1d(queries))
+        output = output.transpose(1, 2)
+        
+        output = self.cif_output(output)
+        alphas = torch.sigmoid(output)
+        alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
+        mask = mask.transpose(-1, -2).float()
+        alphas = alphas * mask
+        alphas = alphas.squeeze(-1)
+        token_num = alphas.sum(-1)
+        
+        mask = mask.squeeze(-1)
+        hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, mask=mask)
+        acoustic_embeds, cif_peak = cif_export(hidden, alphas, self.threshold)
+        
+        return acoustic_embeds, token_num, alphas, cif_peak
+    
+    def get_upsample_timestmap(self, hidden, mask=None, token_num=None):
+        h = hidden
+        b = hidden.shape[0]
+        context = h.transpose(1, 2)
+        
+        # generate alphas2
+        _output = context
+        output2 = self.upsample_cnn(_output)
+        output2 = output2.transpose(1, 2)
+        output2, (_, _) = self.blstm(output2)
+        alphas2 = torch.sigmoid(self.cif_output2(output2))
+        alphas2 = torch.nn.functional.relu(alphas2 * self.smooth_factor2 - self.noise_threshold2)
+        
+        mask = mask.repeat(1, self.upsample_times, 1).transpose(-1, -2).reshape(alphas2.shape[0], -1)
+        mask = mask.unsqueeze(-1)
+        alphas2 = alphas2 * mask
+        alphas2 = alphas2.squeeze(-1)
+        _token_num = alphas2.sum(-1)
+        alphas2 *= (token_num / _token_num)[:, None].repeat(1, alphas2.size(1))
+        # upsampled alphas and cif_peak
+        us_alphas = alphas2
+        us_cif_peak = cif_wo_hidden_export(us_alphas, self.threshold - 1e-4)
+        return us_alphas, us_cif_peak
+    
+    def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
+        b, t, d = hidden.size()
+        tail_threshold = self.tail_threshold
+        
+        zeros_t = torch.zeros((b, 1), dtype=torch.float32, device=alphas.device)
+        ones_t = torch.ones_like(zeros_t)
+        
+        mask_1 = torch.cat([mask, zeros_t], dim=1)
+        mask_2 = torch.cat([ones_t, mask], dim=1)
+        mask = mask_2 - mask_1
+        tail_threshold = mask * tail_threshold
+        alphas = torch.cat([alphas, zeros_t], dim=1)
+        alphas = torch.add(alphas, tail_threshold)
+        
+        zeros = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
+        hidden = torch.cat([hidden, zeros], dim=1)
+        token_num = alphas.sum(dim=-1)
+        token_num_floor = torch.floor(token_num)
+        
+        return hidden, alphas, token_num_floor
+
+
+@torch.jit.script
+def cif_export(hidden, alphas, threshold: float):
+    batch_size, len_time, hidden_size = hidden.size()
+    threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
+    
+    # loop varss
+    integrate = torch.zeros([batch_size], dtype=alphas.dtype, device=hidden.device)
+    frame = torch.zeros([batch_size, hidden_size], dtype=hidden.dtype, device=hidden.device)
+    # intermediate vars along time
+    list_fires = []
+    list_frames = []
+    
+    for t in range(len_time):
+        alpha = alphas[:, t]
+        distribution_completion = torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device) - integrate
+        
+        integrate += alpha
+        list_fires.append(integrate)
+        
+        fire_place = integrate >= threshold
+        integrate = torch.where(fire_place,
+                                integrate - torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device),
+                                integrate)
+        cur = torch.where(fire_place,
+                          distribution_completion,
+                          alpha)
+        remainds = alpha - cur
+        
+        frame += cur[:, None] * hidden[:, t, :]
+        list_frames.append(frame)
+        frame = torch.where(fire_place[:, None].repeat(1, hidden_size),
+                            remainds[:, None] * hidden[:, t, :],
+                            frame)
+    
+    fires = torch.stack(list_fires, 1)
+    frames = torch.stack(list_frames, 1)
+    
+    fire_idxs = fires >= threshold
+    frame_fires = torch.zeros_like(hidden)
+    max_label_len = frames[0, fire_idxs[0]].size(0)
+    for b in range(batch_size):
+        frame_fire = frames[b, fire_idxs[b]]
+        frame_len = frame_fire.size(0)
+        frame_fires[b, :frame_len, :] = frame_fire
+        
+        if frame_len >= max_label_len:
+            max_label_len = frame_len
+    frame_fires = frame_fires[:, :max_label_len, :]
+    return frame_fires, fires
+
+
+@torch.jit.script
+def cif_wo_hidden_export(alphas, threshold: float):
+    batch_size, len_time = alphas.size()
+    
+    # loop varss
+    integrate = torch.zeros([batch_size], dtype=alphas.dtype, device=alphas.device)
+    # intermediate vars along time
+    list_fires = []
+    
+    for t in range(len_time):
+        alpha = alphas[:, t]
+        
+        integrate += alpha
+        list_fires.append(integrate)
+        
+        fire_place = integrate >= threshold
+        integrate = torch.where(fire_place,
+                                integrate - torch.ones([batch_size], device=alphas.device) * threshold,
+                                integrate)
+    
+    fires = torch.stack(list_fires, 1)
+    return fires
\ No newline at end of file

--
Gitblit v1.9.1