From ae49b2a8e1bc676e6014d8a12ebeec947b655e3e Mon Sep 17 00:00:00 2001
From: 莫拉古 <61447879+yechaoying@users.noreply.github.com>
Date: 星期五, 29 十一月 2024 09:55:43 +0800
Subject: [PATCH] 变量名写错了 (#2249)

---
 funasr/models/paraformer/cif_predictor.py |  383 ++++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 284 insertions(+), 99 deletions(-)

diff --git a/funasr/models/paraformer/cif_predictor.py b/funasr/models/paraformer/cif_predictor.py
index d538e21..24145cd 100644
--- a/funasr/models/paraformer/cif_predictor.py
+++ b/funasr/models/paraformer/cif_predictor.py
@@ -12,9 +12,20 @@
 from funasr.models.transformer.utils.nets_utils import make_pad_mask
 from torch.cuda.amp import autocast
 
+
 @tables.register("predictor_classes", "CifPredictor")
 class CifPredictor(torch.nn.Module):
-    def __init__(self, idim, l_order, r_order, threshold=1.0, dropout=0.1, smooth_factor=1.0, noise_threshold=0, tail_threshold=0.45):
+    def __init__(
+        self,
+        idim,
+        l_order,
+        r_order,
+        threshold=1.0,
+        dropout=0.1,
+        smooth_factor=1.0,
+        noise_threshold=0,
+        tail_threshold=0.45,
+    ):
         super().__init__()
 
         self.pad = torch.nn.ConstantPad1d((l_order, r_order), 0)
@@ -26,9 +37,16 @@
         self.noise_threshold = noise_threshold
         self.tail_threshold = tail_threshold
 
-    def forward(self, hidden, target_label=None, mask=None, ignore_id=-1, mask_chunk_predictor=None,
-                target_label_length=None):
-    
+    def forward(
+        self,
+        hidden,
+        target_label=None,
+        mask=None,
+        ignore_id=-1,
+        mask_chunk_predictor=None,
+        target_label_length=None,
+    ):
+
         with autocast(False):
             h = hidden
             context = h.transpose(1, 2)
@@ -58,14 +76,16 @@
             if target_length is not None:
                 alphas *= (target_length / token_num)[:, None].repeat(1, alphas.size(1))
             elif self.tail_threshold > 0.0:
-                hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, token_num, mask=mask)
-                
+                hidden, alphas, token_num = self.tail_process_fn(
+                    hidden, alphas, token_num, mask=mask
+                )
+
             acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
-            
+
             if target_length is None and self.tail_threshold > 0.0:
                 token_num_int = torch.max(token_num).type(torch.int32).item()
                 acoustic_embeds = acoustic_embeds[:, :token_num_int, :]
-                
+
         return acoustic_embeds, token_num, alphas, cif_peak
 
     def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
@@ -91,10 +111,9 @@
 
         return hidden, alphas, token_num_floor
 
-
-    def gen_frame_alignments(self,
-                             alphas: torch.Tensor = None,
-                             encoder_sequence_length: torch.Tensor = None):
+    def gen_frame_alignments(
+        self, alphas: torch.Tensor = None, encoder_sequence_length: torch.Tensor = None
+    ):
         batch_size, maximum_length = alphas.size()
         int_type = torch.int32
 
@@ -117,11 +136,15 @@
         index_div = torch.floor(torch.true_divide(alphas_cumsum, index)).type(int_type)
         index_div_bool_zeros = index_div.eq(0)
         index_div_bool_zeros_count = torch.sum(index_div_bool_zeros, dim=-1) + 1
-        index_div_bool_zeros_count = torch.clamp(index_div_bool_zeros_count, 0, encoder_sequence_length.max())
+        index_div_bool_zeros_count = torch.clamp(
+            index_div_bool_zeros_count, 0, encoder_sequence_length.max()
+        )
         token_num_mask = (~make_pad_mask(token_num, maxlen=max_token_num)).to(token_num.device)
         index_div_bool_zeros_count *= token_num_mask
 
-        index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(1, 1, maximum_length)
+        index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(
+            1, 1, maximum_length
+        )
         ones = torch.ones_like(index_div_bool_zeros_count_tile)
         zeros = torch.zeros_like(index_div_bool_zeros_count_tile)
         ones = torch.cumsum(ones, dim=2)
@@ -132,29 +155,36 @@
         index_div_bool_zeros_count_tile = 1 - index_div_bool_zeros_count_tile_bool.type(int_type)
         index_div_bool_zeros_count_tile_out = torch.sum(index_div_bool_zeros_count_tile, dim=1)
         index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out.type(int_type)
-        predictor_mask = (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max())).type(
-            int_type).to(encoder_sequence_length.device)
+        predictor_mask = (
+            (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max()))
+            .type(int_type)
+            .to(encoder_sequence_length.device)
+        )
         index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out * predictor_mask
 
         predictor_alignments = index_div_bool_zeros_count_tile_out
-        predictor_alignments_length = predictor_alignments.sum(-1).type(encoder_sequence_length.dtype)
+        predictor_alignments_length = predictor_alignments.sum(-1).type(
+            encoder_sequence_length.dtype
+        )
         return predictor_alignments.detach(), predictor_alignments_length.detach()
+
 
 @tables.register("predictor_classes", "CifPredictorV2")
 class CifPredictorV2(torch.nn.Module):
-    def __init__(self,
-                 idim,
-                 l_order,
-                 r_order,
-                 threshold=1.0,
-                 dropout=0.1,
-                 smooth_factor=1.0,
-                 noise_threshold=0,
-                 tail_threshold=0.0,
-                 tf2torch_tensor_name_prefix_torch="predictor",
-                 tf2torch_tensor_name_prefix_tf="seq2seq/cif",
-                 tail_mask=True,
-                 ):
+    def __init__(
+        self,
+        idim,
+        l_order,
+        r_order,
+        threshold=1.0,
+        dropout=0.1,
+        smooth_factor=1.0,
+        noise_threshold=0,
+        tail_threshold=0.0,
+        tf2torch_tensor_name_prefix_torch="predictor",
+        tf2torch_tensor_name_prefix_tf="seq2seq/cif",
+        tail_mask=True,
+    ):
         super().__init__()
 
         self.pad = torch.nn.ConstantPad1d((l_order, r_order), 0)
@@ -169,16 +199,23 @@
         self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
         self.tail_mask = tail_mask
 
-    def forward(self, hidden, target_label=None, mask=None, ignore_id=-1, mask_chunk_predictor=None,
-                target_label_length=None):
-        
+    def forward(
+        self,
+        hidden,
+        target_label=None,
+        mask=None,
+        ignore_id=-1,
+        mask_chunk_predictor=None,
+        target_label_length=None,
+    ):
+
         with autocast(False):
             h = hidden
             context = h.transpose(1, 2)
             queries = self.pad(context)
             output = torch.relu(self.cif_conv1d(queries))
             output = output.transpose(1, 2)
-    
+
             output = self.cif_output(output)
             alphas = torch.sigmoid(output)
             alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
@@ -200,10 +237,14 @@
                 alphas *= (target_length / token_num)[:, None].repeat(1, alphas.size(1))
             elif self.tail_threshold > 0.0:
                 if self.tail_mask:
-                    hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, token_num, mask=mask)
+                    hidden, alphas, token_num = self.tail_process_fn(
+                        hidden, alphas, token_num, mask=mask
+                    )
                 else:
-                    hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, token_num, mask=None)
-    
+                    hidden, alphas, token_num = self.tail_process_fn(
+                        hidden, alphas, token_num, mask=None
+                    )
+
             acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
             if target_length is None and self.tail_threshold > 0.0:
                 token_num_int = torch.max(token_num).type(torch.int32).item()
@@ -232,9 +273,9 @@
         cache_hiddens = []
 
         if cache is not None and "chunk_size" in cache:
-            alphas[:, :cache["chunk_size"][0]] = 0.0
+            alphas[:, : cache["chunk_size"][0]] = 0.0
             if not is_final:
-                alphas[:, sum(cache["chunk_size"][:2]):] = 0.0
+                alphas[:, sum(cache["chunk_size"][:2]) :] = 0.0
         if cache is not None and "cif_alphas" in cache and "cif_hidden" in cache:
             cache["cif_hidden"] = to_device(cache["cif_hidden"], device=hidden.device)
             cache["cif_alphas"] = to_device(cache["cif_alphas"], device=alphas.device)
@@ -284,10 +325,12 @@
 
         max_token_len = max(token_length)
         if max_token_len == 0:
-             return hidden, torch.stack(token_length, 0), None, None
+            return hidden, torch.stack(token_length, 0), None, None
         list_ls = []
         for b in range(batch_size):
-            pad_frames = torch.zeros((max_token_len - token_length[b], hidden_size), device=alphas.device)
+            pad_frames = torch.zeros(
+                (max_token_len - token_length[b], hidden_size), device=alphas.device
+            )
             if token_length[b] == 0:
                 list_ls.append(pad_frames)
             else:
@@ -299,7 +342,6 @@
         cache["cif_hidden"] = torch.stack(cache_hiddens, axis=0)
         cache["cif_hidden"] = torch.unsqueeze(cache["cif_hidden"], axis=0)
         return torch.stack(list_ls, 0), torch.stack(token_length, 0), None, None
-
 
     def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
         b, t, d = hidden.size()
@@ -327,9 +369,9 @@
 
         return hidden, alphas, token_num_floor
 
-    def gen_frame_alignments(self,
-                             alphas: torch.Tensor = None,
-                             encoder_sequence_length: torch.Tensor = None):
+    def gen_frame_alignments(
+        self, alphas: torch.Tensor = None, encoder_sequence_length: torch.Tensor = None
+    ):
         batch_size, maximum_length = alphas.size()
         int_type = torch.int32
 
@@ -352,11 +394,15 @@
         index_div = torch.floor(torch.true_divide(alphas_cumsum, index)).type(int_type)
         index_div_bool_zeros = index_div.eq(0)
         index_div_bool_zeros_count = torch.sum(index_div_bool_zeros, dim=-1) + 1
-        index_div_bool_zeros_count = torch.clamp(index_div_bool_zeros_count, 0, encoder_sequence_length.max())
+        index_div_bool_zeros_count = torch.clamp(
+            index_div_bool_zeros_count, 0, encoder_sequence_length.max()
+        )
         token_num_mask = (~make_pad_mask(token_num, maxlen=max_token_num)).to(token_num.device)
         index_div_bool_zeros_count *= token_num_mask
 
-        index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(1, 1, maximum_length)
+        index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(
+            1, 1, maximum_length
+        )
         ones = torch.ones_like(index_div_bool_zeros_count_tile)
         zeros = torch.zeros_like(index_div_bool_zeros_count_tile)
         ones = torch.cumsum(ones, dim=2)
@@ -367,19 +413,25 @@
         index_div_bool_zeros_count_tile = 1 - index_div_bool_zeros_count_tile_bool.type(int_type)
         index_div_bool_zeros_count_tile_out = torch.sum(index_div_bool_zeros_count_tile, dim=1)
         index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out.type(int_type)
-        predictor_mask = (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max())).type(
-            int_type).to(encoder_sequence_length.device)
+        predictor_mask = (
+            (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max()))
+            .type(int_type)
+            .to(encoder_sequence_length.device)
+        )
         index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out * predictor_mask
 
         predictor_alignments = index_div_bool_zeros_count_tile_out
-        predictor_alignments_length = predictor_alignments.sum(-1).type(encoder_sequence_length.dtype)
+        predictor_alignments_length = predictor_alignments.sum(-1).type(
+            encoder_sequence_length.dtype
+        )
         return predictor_alignments.detach(), predictor_alignments_length.detach()
+
 
 @tables.register("predictor_classes", "CifPredictorV2Export")
 class CifPredictorV2Export(torch.nn.Module):
     def __init__(self, model, **kwargs):
         super().__init__()
-        
+
         self.pad = model.pad
         self.cif_conv1d = model.cif_conv1d
         self.cif_output = model.cif_output
@@ -387,27 +439,31 @@
         self.smooth_factor = model.smooth_factor
         self.noise_threshold = model.noise_threshold
         self.tail_threshold = model.tail_threshold
-    
-    def forward(self, hidden: torch.Tensor,
-                mask: torch.Tensor,
-                ):
+
+    def forward(
+        self,
+        hidden: torch.Tensor,
+        mask: torch.Tensor,
+    ):
         alphas, token_num = self.forward_cnn(hidden, mask)
         mask = mask.transpose(-1, -2).float()
         mask = mask.squeeze(-1)
         hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, mask=mask)
         acoustic_embeds, cif_peak = cif_export(hidden, alphas, self.threshold)
-        
+
         return acoustic_embeds, token_num, alphas, cif_peak
-    
-    def forward_cnn(self, hidden: torch.Tensor,
-                    mask: torch.Tensor,
-                    ):
+
+    def forward_cnn(
+        self,
+        hidden: torch.Tensor,
+        mask: torch.Tensor,
+    ):
         h = hidden
         context = h.transpose(1, 2)
         queries = self.pad(context)
         output = torch.relu(self.cif_conv1d(queries))
         output = output.transpose(1, 2)
-        
+
         output = self.cif_output(output)
         alphas = torch.sigmoid(output)
         alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
@@ -415,67 +471,127 @@
         alphas = alphas * mask
         alphas = alphas.squeeze(-1)
         token_num = alphas.sum(-1)
-        
+
         return alphas, token_num
-    
+
     def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
         b, t, d = hidden.size()
         tail_threshold = self.tail_threshold
-        
+
         zeros_t = torch.zeros((b, 1), dtype=torch.float32, device=alphas.device)
         ones_t = torch.ones_like(zeros_t)
-        
+
         mask_1 = torch.cat([mask, zeros_t], dim=1)
         mask_2 = torch.cat([ones_t, mask], dim=1)
         mask = mask_2 - mask_1
         tail_threshold = mask * tail_threshold
         alphas = torch.cat([alphas, zeros_t], dim=1)
         alphas = torch.add(alphas, tail_threshold)
-        
+
         zeros = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
         hidden = torch.cat([hidden, zeros], dim=1)
         token_num = alphas.sum(dim=-1)
         token_num_floor = torch.floor(token_num)
-        
+
         return hidden, alphas, token_num_floor
+
+
+@torch.jit.script
+def cif_v1_export(hidden, alphas, threshold: float):
+    device = hidden.device
+    dtype = hidden.dtype
+    batch_size, len_time, hidden_size = hidden.size()
+    threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
+
+    frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
+    fires = torch.zeros(batch_size, len_time, dtype=dtype, device=device)
+
+    # prefix_sum = torch.cumsum(alphas, dim=1)
+    prefix_sum = torch.cumsum(alphas, dim=1, dtype=torch.float64).to(
+        torch.float32
+    )  # cumsum precision degradation cause wrong result in extreme
+    prefix_sum_floor = torch.floor(prefix_sum)
+    dislocation_prefix_sum = torch.roll(prefix_sum, 1, dims=1)
+    dislocation_prefix_sum_floor = torch.floor(dislocation_prefix_sum)
+
+    dislocation_prefix_sum_floor[:, 0] = 0
+    dislocation_diff = prefix_sum_floor - dislocation_prefix_sum_floor
+
+    fire_idxs = dislocation_diff > 0
+    fires[fire_idxs] = 1
+    fires = fires + prefix_sum - prefix_sum_floor
+
+    # prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
+    prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
+    frames = prefix_sum_hidden[fire_idxs]
+    shift_frames = torch.roll(frames, 1, dims=0)
+
+    batch_len = fire_idxs.sum(1)
+    batch_idxs = torch.cumsum(batch_len, dim=0)
+    shift_batch_idxs = torch.roll(batch_idxs, 1, dims=0)
+    shift_batch_idxs[0] = 0
+    shift_frames[shift_batch_idxs] = 0
+
+    remains = fires - torch.floor(fires)
+    # remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
+    remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
+
+    shift_remain_frames = torch.roll(remain_frames, 1, dims=0)
+    shift_remain_frames[shift_batch_idxs] = 0
+
+    frames = frames - shift_frames + shift_remain_frames - remain_frames
+
+    # max_label_len = batch_len.max()
+    max_label_len = alphas.sum(dim=-1)
+    max_label_len = torch.floor(max_label_len).max().to(dtype=torch.int64)
+
+    # frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
+    frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
+    indices = torch.arange(max_label_len, device=device).expand(batch_size, -1)
+    frame_fires_idxs = indices < batch_len.unsqueeze(1)
+    frame_fires[frame_fires_idxs] = frames
+    return frame_fires, fires
+
 
 @torch.jit.script
 def cif_export(hidden, alphas, threshold: float):
     batch_size, len_time, hidden_size = hidden.size()
     threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
-    
+
     # loop varss
     integrate = torch.zeros([batch_size], dtype=alphas.dtype, device=hidden.device)
     frame = torch.zeros([batch_size, hidden_size], dtype=hidden.dtype, device=hidden.device)
     # intermediate vars along time
     list_fires = []
     list_frames = []
-    
+
     for t in range(len_time):
         alpha = alphas[:, t]
-        distribution_completion = torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device) - integrate
-        
+        distribution_completion = (
+            torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device) - integrate
+        )
+
         integrate += alpha
         list_fires.append(integrate)
-        
+
         fire_place = integrate >= threshold
-        integrate = torch.where(fire_place,
-                                integrate - torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device),
-                                integrate)
-        cur = torch.where(fire_place,
-                          distribution_completion,
-                          alpha)
+        integrate = torch.where(
+            fire_place,
+            integrate - torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device),
+            integrate,
+        )
+        cur = torch.where(fire_place, distribution_completion, alpha)
         remainds = alpha - cur
-        
+
         frame += cur[:, None] * hidden[:, t, :]
         list_frames.append(frame)
-        frame = torch.where(fire_place[:, None].repeat(1, hidden_size),
-                            remainds[:, None] * hidden[:, t, :],
-                            frame)
-    
+        frame = torch.where(
+            fire_place[:, None].repeat(1, hidden_size), remainds[:, None] * hidden[:, t, :], frame
+        )
+
     fires = torch.stack(list_fires, 1)
     frames = torch.stack(list_frames, 1)
-    
+
     fire_idxs = fires >= threshold
     frame_fires = torch.zeros_like(hidden)
     max_label_len = frames[0, fire_idxs[0]].size(0)
@@ -483,7 +599,7 @@
         frame_fire = frames[b, fire_idxs[b]]
         frame_len = frame_fire.size(0)
         frame_fires[b, :frame_len, :] = frame_fire
-        
+
         if frame_len >= max_label_len:
             max_label_len = frame_len
     frame_fires = frame_fires[:, :max_label_len, :]
@@ -495,7 +611,7 @@
     def __init__(self, normalize_length=False):
         super(mae_loss, self).__init__()
         self.normalize_length = normalize_length
-        self.criterion = torch.nn.L1Loss(reduction='sum')
+        self.criterion = torch.nn.L1Loss(reduction="sum")
 
     def forward(self, token_length, pre_token_length):
         loss_token_normalizer = token_length.size(0)
@@ -524,19 +640,17 @@
         list_fires.append(integrate)
 
         fire_place = integrate >= threshold
-        integrate = torch.where(fire_place,
-                                integrate - torch.ones([batch_size], device=hidden.device),
-                                integrate)
-        cur = torch.where(fire_place,
-                          distribution_completion,
-                          alpha)
+        integrate = torch.where(
+            fire_place, integrate - torch.ones([batch_size], device=hidden.device), integrate
+        )
+        cur = torch.where(fire_place, distribution_completion, alpha)
         remainds = alpha - cur
 
         frame += cur[:, None] * hidden[:, t, :]
         list_frames.append(frame)
-        frame = torch.where(fire_place[:, None].repeat(1, hidden_size),
-                            remainds[:, None] * hidden[:, t, :],
-                            frame)
+        frame = torch.where(
+            fire_place[:, None].repeat(1, hidden_size), remainds[:, None] * hidden[:, t, :], frame
+        )
 
     fires = torch.stack(list_fires, 1)
     frames = torch.stack(list_frames, 1)
@@ -549,6 +663,76 @@
         pad_l = torch.zeros([max_label_len - l.size(0), hidden_size], device=hidden.device)
         list_ls.append(torch.cat([l, pad_l], 0))
     return torch.stack(list_ls, 0), fires
+
+
+def cif_wo_hidden_v1(alphas, threshold, return_fire_idxs=False):
+    batch_size, len_time = alphas.size()
+    device = alphas.device
+    dtype = alphas.dtype
+
+    threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
+
+    fires = torch.zeros(batch_size, len_time, dtype=dtype, device=device)
+
+    # prefix_sum = torch.cumsum(alphas, dim=1)
+    prefix_sum = torch.cumsum(alphas, dim=1, dtype=torch.float64).to(
+        torch.float32
+    )  # cumsum precision degradation cause wrong result in extreme
+    prefix_sum_floor = torch.floor(prefix_sum)
+    dislocation_prefix_sum = torch.roll(prefix_sum, 1, dims=1)
+    dislocation_prefix_sum_floor = torch.floor(dislocation_prefix_sum)
+
+    dislocation_prefix_sum_floor[:, 0] = 0
+    dislocation_diff = prefix_sum_floor - dislocation_prefix_sum_floor
+
+    fire_idxs = dislocation_diff > 0
+    fires[fire_idxs] = 1
+    fires = fires + prefix_sum - prefix_sum_floor
+    if return_fire_idxs:
+        return fires, fire_idxs
+    return fires
+
+
+def cif_v1(hidden, alphas, threshold):
+    fires, fire_idxs = cif_wo_hidden_v1(alphas, threshold, return_fire_idxs=True)
+
+    device = hidden.device
+    dtype = hidden.dtype
+    batch_size, len_time, hidden_size = hidden.size()
+    # frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
+    # prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
+    frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
+    prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
+
+    frames = prefix_sum_hidden[fire_idxs]
+    shift_frames = torch.roll(frames, 1, dims=0)
+
+    batch_len = fire_idxs.sum(1)
+    batch_idxs = torch.cumsum(batch_len, dim=0)
+    shift_batch_idxs = torch.roll(batch_idxs, 1, dims=0)
+    shift_batch_idxs[0] = 0
+    shift_frames[shift_batch_idxs] = 0
+
+    remains = fires - torch.floor(fires)
+    # remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
+    remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
+
+    shift_remain_frames = torch.roll(remain_frames, 1, dims=0)
+    shift_remain_frames[shift_batch_idxs] = 0
+
+    frames = frames - shift_frames + shift_remain_frames - remain_frames
+
+    # max_label_len = batch_len.max()
+    max_label_len = (
+        torch.round(alphas.sum(-1)).int().max()
+    )  # torch.round to calculate the max length
+
+    # frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
+    frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
+    indices = torch.arange(max_label_len, device=device).expand(batch_size, -1)
+    frame_fires_idxs = indices < batch_len.unsqueeze(1)
+    frame_fires[frame_fires_idxs] = frames
+    return frame_fires, fires
 
 
 def cif_wo_hidden(alphas, threshold):
@@ -566,10 +750,11 @@
         list_fires.append(integrate)
 
         fire_place = integrate >= threshold
-        integrate = torch.where(fire_place,
-                                integrate - torch.ones([batch_size], device=alphas.device)*threshold,
-                                integrate)
+        integrate = torch.where(
+            fire_place,
+            integrate - torch.ones([batch_size], device=alphas.device) * threshold,
+            integrate,
+        )
 
     fires = torch.stack(list_fires, 1)
     return fires
-

--
Gitblit v1.9.1