| | |
| | | #!/usr/bin/env python3
|
| | | # -*- encoding: utf-8 -*-
|
| | | # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
|
| | | # MIT License (https://opensource.org/licenses/MIT)
|
| | |
|
| | | import torch
|
| | | from torch import nn
|
| | | from torch import Tensor
|
| | | import logging
|
| | | import numpy as np
|
| | | from funasr.train_utils.device_funcs import to_device
|
| | | from funasr.models.transformer.utils.nets_utils import make_pad_mask
|
| | | from funasr.models.scama.utils import sequence_mask
|
| | | from typing import Optional, Tuple
|
| | |
|
| | | from funasr.register import tables
|
| | | from funasr.train_utils.device_funcs import to_device
|
| | | from funasr.models.transformer.utils.nets_utils import make_pad_mask
|
| | | from torch.cuda.amp import autocast
|
| | |
|
| | |
|
| | | @tables.register("predictor_classes", "CifPredictor")
|
| | | class CifPredictor(nn.Module):
|
| | | def __init__(self, idim, l_order, r_order, threshold=1.0, dropout=0.1, smooth_factor=1.0, noise_threshold=0, tail_threshold=0.45):
|
| | | class CifPredictor(torch.nn.Module):
|
| | | def __init__(
|
| | | self,
|
| | | idim,
|
| | | l_order,
|
| | | r_order,
|
| | | threshold=1.0,
|
| | | dropout=0.1,
|
| | | smooth_factor=1.0,
|
| | | noise_threshold=0,
|
| | | tail_threshold=0.45,
|
| | | ):
|
| | | super().__init__()
|
| | |
|
| | | self.pad = nn.ConstantPad1d((l_order, r_order), 0)
|
| | | self.cif_conv1d = nn.Conv1d(idim, idim, l_order + r_order + 1, groups=idim)
|
| | | self.cif_output = nn.Linear(idim, 1)
|
| | | self.pad = torch.nn.ConstantPad1d((l_order, r_order), 0)
|
| | | self.cif_conv1d = torch.nn.Conv1d(idim, idim, l_order + r_order + 1, groups=idim)
|
| | | self.cif_output = torch.nn.Linear(idim, 1)
|
| | | self.dropout = torch.nn.Dropout(p=dropout)
|
| | | self.threshold = threshold
|
| | | self.smooth_factor = smooth_factor
|
| | | self.noise_threshold = noise_threshold
|
| | | self.tail_threshold = tail_threshold
|
| | |
|
| | | def forward(self, hidden, target_label=None, mask=None, ignore_id=-1, mask_chunk_predictor=None,
|
| | | target_label_length=None):
|
| | | h = hidden
|
| | | context = h.transpose(1, 2)
|
| | | queries = self.pad(context)
|
| | | memory = self.cif_conv1d(queries)
|
| | | output = memory + context
|
| | | output = self.dropout(output)
|
| | | output = output.transpose(1, 2)
|
| | | output = torch.relu(output)
|
| | | output = self.cif_output(output)
|
| | | alphas = torch.sigmoid(output)
|
| | | alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
|
| | | if mask is not None:
|
| | | mask = mask.transpose(-1, -2).float()
|
| | | alphas = alphas * mask
|
| | | if mask_chunk_predictor is not None:
|
| | | alphas = alphas * mask_chunk_predictor
|
| | | alphas = alphas.squeeze(-1)
|
| | | mask = mask.squeeze(-1)
|
| | | if target_label_length is not None:
|
| | | target_length = target_label_length
|
| | | elif target_label is not None:
|
| | | target_length = (target_label != ignore_id).float().sum(-1)
|
| | | else:
|
| | | target_length = None
|
| | | token_num = alphas.sum(-1)
|
| | | if target_length is not None:
|
| | | alphas *= (target_length / token_num)[:, None].repeat(1, alphas.size(1))
|
| | | elif self.tail_threshold > 0.0:
|
| | | hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, token_num, mask=mask)
|
| | | |
| | | acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
|
| | | |
| | | if target_length is None and self.tail_threshold > 0.0:
|
| | | token_num_int = torch.max(token_num).type(torch.int32).item()
|
| | | acoustic_embeds = acoustic_embeds[:, :token_num_int, :]
|
| | | |
| | | def forward(
|
| | | self,
|
| | | hidden,
|
| | | target_label=None,
|
| | | mask=None,
|
| | | ignore_id=-1,
|
| | | mask_chunk_predictor=None,
|
| | | target_label_length=None,
|
| | | ):
|
| | |
|
| | | with autocast(False):
|
| | | h = hidden
|
| | | context = h.transpose(1, 2)
|
| | | queries = self.pad(context)
|
| | | memory = self.cif_conv1d(queries)
|
| | | output = memory + context
|
| | | output = self.dropout(output)
|
| | | output = output.transpose(1, 2)
|
| | | output = torch.relu(output)
|
| | | output = self.cif_output(output)
|
| | | alphas = torch.sigmoid(output)
|
| | | alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
|
| | | if mask is not None:
|
| | | mask = mask.transpose(-1, -2).float()
|
| | | alphas = alphas * mask
|
| | | if mask_chunk_predictor is not None:
|
| | | alphas = alphas * mask_chunk_predictor
|
| | | alphas = alphas.squeeze(-1)
|
| | | mask = mask.squeeze(-1)
|
| | | if target_label_length is not None:
|
| | | target_length = target_label_length
|
| | | elif target_label is not None:
|
| | | target_length = (target_label != ignore_id).float().sum(-1)
|
| | | else:
|
| | | target_length = None
|
| | | token_num = alphas.sum(-1)
|
| | | if target_length is not None:
|
| | | alphas *= (target_length / token_num)[:, None].repeat(1, alphas.size(1))
|
| | | elif self.tail_threshold > 0.0:
|
| | | hidden, alphas, token_num = self.tail_process_fn(
|
| | | hidden, alphas, token_num, mask=mask
|
| | | )
|
| | |
|
| | | acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
|
| | |
|
| | | if target_length is None and self.tail_threshold > 0.0:
|
| | | token_num_int = torch.max(token_num).type(torch.int32).item()
|
| | | acoustic_embeds = acoustic_embeds[:, :token_num_int, :]
|
| | |
|
| | | return acoustic_embeds, token_num, alphas, cif_peak
|
| | |
|
| | | def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
|
| | |
| | |
|
| | | return hidden, alphas, token_num_floor
|
| | |
|
| | |
|
| | | def gen_frame_alignments(self,
|
| | | alphas: torch.Tensor = None,
|
| | | encoder_sequence_length: torch.Tensor = None):
|
| | | def gen_frame_alignments(
|
| | | self, alphas: torch.Tensor = None, encoder_sequence_length: torch.Tensor = None
|
| | | ):
|
| | | batch_size, maximum_length = alphas.size()
|
| | | int_type = torch.int32
|
| | |
|
| | |
| | | index_div = torch.floor(torch.true_divide(alphas_cumsum, index)).type(int_type)
|
| | | index_div_bool_zeros = index_div.eq(0)
|
| | | index_div_bool_zeros_count = torch.sum(index_div_bool_zeros, dim=-1) + 1
|
| | | index_div_bool_zeros_count = torch.clamp(index_div_bool_zeros_count, 0, encoder_sequence_length.max())
|
| | | index_div_bool_zeros_count = torch.clamp(
|
| | | index_div_bool_zeros_count, 0, encoder_sequence_length.max()
|
| | | )
|
| | | token_num_mask = (~make_pad_mask(token_num, maxlen=max_token_num)).to(token_num.device)
|
| | | index_div_bool_zeros_count *= token_num_mask
|
| | |
|
| | | index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(1, 1, maximum_length)
|
| | | index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(
|
| | | 1, 1, maximum_length
|
| | | )
|
| | | ones = torch.ones_like(index_div_bool_zeros_count_tile)
|
| | | zeros = torch.zeros_like(index_div_bool_zeros_count_tile)
|
| | | ones = torch.cumsum(ones, dim=2)
|
| | |
| | | index_div_bool_zeros_count_tile = 1 - index_div_bool_zeros_count_tile_bool.type(int_type)
|
| | | index_div_bool_zeros_count_tile_out = torch.sum(index_div_bool_zeros_count_tile, dim=1)
|
| | | index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out.type(int_type)
|
| | | predictor_mask = (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max())).type(
|
| | | int_type).to(encoder_sequence_length.device)
|
| | | predictor_mask = (
|
| | | (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max()))
|
| | | .type(int_type)
|
| | | .to(encoder_sequence_length.device)
|
| | | )
|
| | | index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out * predictor_mask
|
| | |
|
| | | predictor_alignments = index_div_bool_zeros_count_tile_out
|
| | | predictor_alignments_length = predictor_alignments.sum(-1).type(encoder_sequence_length.dtype)
|
| | | predictor_alignments_length = predictor_alignments.sum(-1).type(
|
| | | encoder_sequence_length.dtype
|
| | | )
|
| | | return predictor_alignments.detach(), predictor_alignments_length.detach()
|
| | |
|
| | | @tables.register("predictor_classes", "CifPredictorV2")
|
| | | class CifPredictorV2(nn.Module):
|
| | | def __init__(self,
|
| | | idim,
|
| | | l_order,
|
| | | r_order,
|
| | | threshold=1.0,
|
| | | dropout=0.1,
|
| | | smooth_factor=1.0,
|
| | | noise_threshold=0,
|
| | | tail_threshold=0.0,
|
| | | tf2torch_tensor_name_prefix_torch="predictor",
|
| | | tf2torch_tensor_name_prefix_tf="seq2seq/cif",
|
| | | tail_mask=True,
|
| | | ):
|
| | | super(CifPredictorV2, self).__init__()
|
| | |
|
| | | self.pad = nn.ConstantPad1d((l_order, r_order), 0)
|
| | | self.cif_conv1d = nn.Conv1d(idim, idim, l_order + r_order + 1)
|
| | | self.cif_output = nn.Linear(idim, 1)
|
| | | @tables.register("predictor_classes", "CifPredictorV2")
|
| | | class CifPredictorV2(torch.nn.Module):
|
| | | def __init__(
|
| | | self,
|
| | | idim,
|
| | | l_order,
|
| | | r_order,
|
| | | threshold=1.0,
|
| | | dropout=0.1,
|
| | | smooth_factor=1.0,
|
| | | noise_threshold=0,
|
| | | tail_threshold=0.0,
|
| | | tf2torch_tensor_name_prefix_torch="predictor",
|
| | | tf2torch_tensor_name_prefix_tf="seq2seq/cif",
|
| | | tail_mask=True,
|
| | | ):
|
| | | super().__init__()
|
| | |
|
| | | self.pad = torch.nn.ConstantPad1d((l_order, r_order), 0)
|
| | | self.cif_conv1d = torch.nn.Conv1d(idim, idim, l_order + r_order + 1)
|
| | | self.cif_output = torch.nn.Linear(idim, 1)
|
| | | self.dropout = torch.nn.Dropout(p=dropout)
|
| | | self.threshold = threshold
|
| | | self.smooth_factor = smooth_factor
|
| | |
| | | self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
|
| | | self.tail_mask = tail_mask
|
| | |
|
| | | def forward(self, hidden, target_label=None, mask=None, ignore_id=-1, mask_chunk_predictor=None,
|
| | | target_label_length=None):
|
| | | h = hidden
|
| | | context = h.transpose(1, 2)
|
| | | queries = self.pad(context)
|
| | | output = torch.relu(self.cif_conv1d(queries))
|
| | | output = output.transpose(1, 2)
|
| | | def forward(
|
| | | self,
|
| | | hidden,
|
| | | target_label=None,
|
| | | mask=None,
|
| | | ignore_id=-1,
|
| | | mask_chunk_predictor=None,
|
| | | target_label_length=None,
|
| | | ):
|
| | |
|
| | | output = self.cif_output(output)
|
| | | alphas = torch.sigmoid(output)
|
| | | alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
|
| | | if mask is not None:
|
| | | mask = mask.transpose(-1, -2).float()
|
| | | alphas = alphas * mask
|
| | | if mask_chunk_predictor is not None:
|
| | | alphas = alphas * mask_chunk_predictor
|
| | | alphas = alphas.squeeze(-1)
|
| | | mask = mask.squeeze(-1)
|
| | | if target_label_length is not None:
|
| | | target_length = target_label_length
|
| | | elif target_label is not None:
|
| | | target_length = (target_label != ignore_id).float().sum(-1)
|
| | | else:
|
| | | target_length = None
|
| | | token_num = alphas.sum(-1)
|
| | | if target_length is not None:
|
| | | alphas *= (target_length / token_num)[:, None].repeat(1, alphas.size(1))
|
| | | elif self.tail_threshold > 0.0:
|
| | | if self.tail_mask:
|
| | | hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, token_num, mask=mask)
|
| | | with autocast(False):
|
| | | h = hidden
|
| | | context = h.transpose(1, 2)
|
| | | queries = self.pad(context)
|
| | | output = torch.relu(self.cif_conv1d(queries))
|
| | | output = output.transpose(1, 2)
|
| | |
|
| | | output = self.cif_output(output)
|
| | | alphas = torch.sigmoid(output)
|
| | | alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
|
| | | if mask is not None:
|
| | | mask = mask.transpose(-1, -2).float()
|
| | | alphas = alphas * mask
|
| | | if mask_chunk_predictor is not None:
|
| | | alphas = alphas * mask_chunk_predictor
|
| | | alphas = alphas.squeeze(-1)
|
| | | mask = mask.squeeze(-1)
|
| | | if target_label_length is not None:
|
| | | target_length = target_label_length.squeeze(-1)
|
| | | elif target_label is not None:
|
| | | target_length = (target_label != ignore_id).float().sum(-1)
|
| | | else:
|
| | | hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, token_num, mask=None)
|
| | | target_length = None
|
| | | token_num = alphas.sum(-1)
|
| | | if target_length is not None:
|
| | | alphas *= (target_length / token_num)[:, None].repeat(1, alphas.size(1))
|
| | | elif self.tail_threshold > 0.0:
|
| | | if self.tail_mask:
|
| | | hidden, alphas, token_num = self.tail_process_fn(
|
| | | hidden, alphas, token_num, mask=mask
|
| | | )
|
| | | else:
|
| | | hidden, alphas, token_num = self.tail_process_fn(
|
| | | hidden, alphas, token_num, mask=None
|
| | | )
|
| | |
|
| | | acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
|
| | | if target_length is None and self.tail_threshold > 0.0:
|
| | | token_num_int = torch.max(token_num).type(torch.int32).item()
|
| | | acoustic_embeds = acoustic_embeds[:, :token_num_int, :]
|
| | | acoustic_embeds, cif_peak = cif_v1(hidden, alphas, self.threshold)
|
| | | if target_length is None and self.tail_threshold > 0.0:
|
| | | token_num_int = torch.max(token_num).type(torch.int32).item()
|
| | | acoustic_embeds = acoustic_embeds[:, :token_num_int, :]
|
| | |
|
| | | return acoustic_embeds, token_num, alphas, cif_peak
|
| | |
|
| | | def forward_chunk(self, hidden, cache=None):
|
| | | def forward_chunk(self, hidden, cache=None, **kwargs):
|
| | | is_final = kwargs.get("is_final", False)
|
| | | batch_size, len_time, hidden_size = hidden.shape
|
| | | h = hidden
|
| | | context = h.transpose(1, 2)
|
| | |
| | | cache_hiddens = []
|
| | |
|
| | | if cache is not None and "chunk_size" in cache:
|
| | | alphas[:, :cache["chunk_size"][0]] = 0.0
|
| | | if "is_final" in cache and not cache["is_final"]:
|
| | | alphas[:, sum(cache["chunk_size"][:2]):] = 0.0
|
| | | alphas[:, : cache["chunk_size"][0]] = 0.0
|
| | | if not is_final:
|
| | | alphas[:, sum(cache["chunk_size"][:2]) :] = 0.0
|
| | | if cache is not None and "cif_alphas" in cache and "cif_hidden" in cache:
|
| | | cache["cif_hidden"] = to_device(cache["cif_hidden"], device=hidden.device)
|
| | | cache["cif_alphas"] = to_device(cache["cif_alphas"], device=alphas.device)
|
| | | hidden = torch.cat((cache["cif_hidden"], hidden), dim=1)
|
| | | alphas = torch.cat((cache["cif_alphas"], alphas), dim=1)
|
| | | if cache is not None and "is_final" in cache and cache["is_final"]:
|
| | | if cache is not None and is_final:
|
| | | tail_hidden = torch.zeros((batch_size, 1, hidden_size), device=hidden.device)
|
| | | tail_alphas = torch.tensor([[self.tail_threshold]], device=alphas.device)
|
| | | tail_alphas = torch.tile(tail_alphas, (batch_size, 1))
|
| | |
| | |
|
| | | max_token_len = max(token_length)
|
| | | if max_token_len == 0:
|
| | | return hidden, torch.stack(token_length, 0)
|
| | | return hidden, torch.stack(token_length, 0), None, None
|
| | | list_ls = []
|
| | | for b in range(batch_size):
|
| | | pad_frames = torch.zeros((max_token_len - token_length[b], hidden_size), device=alphas.device)
|
| | | pad_frames = torch.zeros(
|
| | | (max_token_len - token_length[b], hidden_size), device=alphas.device
|
| | | )
|
| | | if token_length[b] == 0:
|
| | | list_ls.append(pad_frames)
|
| | | else:
|
| | |
| | | cache["cif_alphas"] = torch.unsqueeze(cache["cif_alphas"], axis=0)
|
| | | cache["cif_hidden"] = torch.stack(cache_hiddens, axis=0)
|
| | | cache["cif_hidden"] = torch.unsqueeze(cache["cif_hidden"], axis=0)
|
| | | return torch.stack(list_ls, 0), torch.stack(token_length, 0)
|
| | |
|
| | | return torch.stack(list_ls, 0), torch.stack(token_length, 0), None, None
|
| | |
|
| | | def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
|
| | | b, t, d = hidden.size()
|
| | |
| | |
|
| | | return hidden, alphas, token_num_floor
|
| | |
|
| | | def gen_frame_alignments(self,
|
| | | alphas: torch.Tensor = None,
|
| | | encoder_sequence_length: torch.Tensor = None):
|
| | | def gen_frame_alignments(
|
| | | self, alphas: torch.Tensor = None, encoder_sequence_length: torch.Tensor = None
|
| | | ):
|
| | | batch_size, maximum_length = alphas.size()
|
| | | int_type = torch.int32
|
| | |
|
| | |
| | | index_div = torch.floor(torch.true_divide(alphas_cumsum, index)).type(int_type)
|
| | | index_div_bool_zeros = index_div.eq(0)
|
| | | index_div_bool_zeros_count = torch.sum(index_div_bool_zeros, dim=-1) + 1
|
| | | index_div_bool_zeros_count = torch.clamp(index_div_bool_zeros_count, 0, encoder_sequence_length.max())
|
| | | index_div_bool_zeros_count = torch.clamp(
|
| | | index_div_bool_zeros_count, 0, encoder_sequence_length.max()
|
| | | )
|
| | | token_num_mask = (~make_pad_mask(token_num, maxlen=max_token_num)).to(token_num.device)
|
| | | index_div_bool_zeros_count *= token_num_mask
|
| | |
|
| | | index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(1, 1, maximum_length)
|
| | | index_div_bool_zeros_count_tile = index_div_bool_zeros_count[:, :, None].repeat(
|
| | | 1, 1, maximum_length
|
| | | )
|
| | | ones = torch.ones_like(index_div_bool_zeros_count_tile)
|
| | | zeros = torch.zeros_like(index_div_bool_zeros_count_tile)
|
| | | ones = torch.cumsum(ones, dim=2)
|
| | |
| | | index_div_bool_zeros_count_tile = 1 - index_div_bool_zeros_count_tile_bool.type(int_type)
|
| | | index_div_bool_zeros_count_tile_out = torch.sum(index_div_bool_zeros_count_tile, dim=1)
|
| | | index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out.type(int_type)
|
| | | predictor_mask = (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max())).type(
|
| | | int_type).to(encoder_sequence_length.device)
|
| | | predictor_mask = (
|
| | | (~make_pad_mask(encoder_sequence_length, maxlen=encoder_sequence_length.max()))
|
| | | .type(int_type)
|
| | | .to(encoder_sequence_length.device)
|
| | | )
|
| | | index_div_bool_zeros_count_tile_out = index_div_bool_zeros_count_tile_out * predictor_mask
|
| | |
|
| | | predictor_alignments = index_div_bool_zeros_count_tile_out
|
| | | predictor_alignments_length = predictor_alignments.sum(-1).type(encoder_sequence_length.dtype)
|
| | | predictor_alignments_length = predictor_alignments.sum(-1).type(
|
| | | encoder_sequence_length.dtype
|
| | | )
|
| | | return predictor_alignments.detach(), predictor_alignments_length.detach()
|
| | |
|
| | | def gen_tf2torch_map_dict(self):
|
| | | |
| | | tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
|
| | | tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
|
| | | map_dict_local = {
|
| | | ## predictor
|
| | | "{}.cif_conv1d.weight".format(tensor_name_prefix_torch):
|
| | | {"name": "{}/conv1d/kernel".format(tensor_name_prefix_tf),
|
| | | "squeeze": None,
|
| | | "transpose": (2, 1, 0),
|
| | | }, # (256,256,3),(3,256,256)
|
| | | "{}.cif_conv1d.bias".format(tensor_name_prefix_torch):
|
| | | {"name": "{}/conv1d/bias".format(tensor_name_prefix_tf),
|
| | | "squeeze": None,
|
| | | "transpose": None,
|
| | | }, # (256,),(256,)
|
| | | "{}.cif_output.weight".format(tensor_name_prefix_torch):
|
| | | {"name": "{}/conv1d_1/kernel".format(tensor_name_prefix_tf),
|
| | | "squeeze": 0,
|
| | | "transpose": (1, 0),
|
| | | }, # (1,256),(1,256,1)
|
| | | "{}.cif_output.bias".format(tensor_name_prefix_torch):
|
| | | {"name": "{}/conv1d_1/bias".format(tensor_name_prefix_tf),
|
| | | "squeeze": None,
|
| | | "transpose": None,
|
| | | }, # (1,),(1,)
|
| | | }
|
| | | return map_dict_local
|
| | |
|
| | | def convert_tf2torch(self,
|
| | | var_dict_tf,
|
| | | var_dict_torch,
|
| | | ):
|
| | | map_dict = self.gen_tf2torch_map_dict()
|
| | | var_dict_torch_update = dict()
|
| | | for name in sorted(var_dict_torch.keys(), reverse=False):
|
| | | names = name.split('.')
|
| | | if names[0] == self.tf2torch_tensor_name_prefix_torch:
|
| | | name_tf = map_dict[name]["name"]
|
| | | data_tf = var_dict_tf[name_tf]
|
| | | if map_dict[name]["squeeze"] is not None:
|
| | | data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
|
| | | if map_dict[name]["transpose"] is not None:
|
| | | data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
|
| | | data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
|
| | | assert var_dict_torch[name].size() == data_tf.size(), "{}, {}, {} != {}".format(name, name_tf,
|
| | | var_dict_torch[
|
| | | name].size(),
|
| | | data_tf.size())
|
| | | var_dict_torch_update[name] = data_tf
|
| | | logging.info(
|
| | | "torch tensor: {}, {}, loading from tf tensor: {}, {}".format(name, data_tf.size(), name_tf,
|
| | | var_dict_tf[name_tf].shape))
|
| | | |
| | | return var_dict_torch_update
|
| | | @tables.register("predictor_classes", "CifPredictorV2Export")
|
| | | class CifPredictorV2Export(torch.nn.Module):
|
| | | def __init__(self, model, **kwargs):
|
| | | super().__init__()
|
| | |
|
| | | self.pad = model.pad
|
| | | self.cif_conv1d = model.cif_conv1d
|
| | | self.cif_output = model.cif_output
|
| | | self.threshold = model.threshold
|
| | | self.smooth_factor = model.smooth_factor
|
| | | self.noise_threshold = model.noise_threshold
|
| | | self.tail_threshold = model.tail_threshold
|
| | |
|
| | | def forward(
|
| | | self,
|
| | | hidden: torch.Tensor,
|
| | | mask: torch.Tensor,
|
| | | ):
|
| | | alphas, token_num = self.forward_cnn(hidden, mask)
|
| | | mask = mask.transpose(-1, -2).float()
|
| | | mask = mask.squeeze(-1)
|
| | | hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, mask=mask)
|
| | | acoustic_embeds, cif_peak = cif_v1_export(hidden, alphas, self.threshold)
|
| | |
|
| | | return acoustic_embeds, token_num, alphas, cif_peak
|
| | |
|
| | | def forward_cnn(
|
| | | self,
|
| | | hidden: torch.Tensor,
|
| | | mask: torch.Tensor,
|
| | | ):
|
| | | h = hidden
|
| | | context = h.transpose(1, 2)
|
| | | queries = self.pad(context)
|
| | | output = torch.relu(self.cif_conv1d(queries))
|
| | | output = output.transpose(1, 2)
|
| | |
|
| | | output = self.cif_output(output)
|
| | | alphas = torch.sigmoid(output)
|
| | | alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
|
| | | mask = mask.transpose(-1, -2).float()
|
| | | alphas = alphas * mask
|
| | | alphas = alphas.squeeze(-1)
|
| | | token_num = alphas.sum(-1)
|
| | |
|
| | | return alphas, token_num
|
| | |
|
| | | def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
|
| | | b, t, d = hidden.size()
|
| | | tail_threshold = self.tail_threshold
|
| | |
|
| | | zeros_t = torch.zeros((b, 1), dtype=torch.float32, device=alphas.device)
|
| | | ones_t = torch.ones_like(zeros_t)
|
| | |
|
| | | mask_1 = torch.cat([mask, zeros_t], dim=1)
|
| | | mask_2 = torch.cat([ones_t, mask], dim=1)
|
| | | mask = mask_2 - mask_1
|
| | | tail_threshold = mask * tail_threshold
|
| | | alphas = torch.cat([alphas, zeros_t], dim=1)
|
| | | alphas = torch.add(alphas, tail_threshold)
|
| | |
|
| | | zeros = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
|
| | | hidden = torch.cat([hidden, zeros], dim=1)
|
| | | token_num = alphas.sum(dim=-1)
|
| | | token_num_floor = torch.floor(token_num)
|
| | |
|
| | | return hidden, alphas, token_num_floor
|
| | |
|
| | |
|
| | | class mae_loss(nn.Module):
|
| | | @torch.jit.script
|
| | | def cif_v1_export(hidden, alphas, threshold: float):
|
| | | device = hidden.device
|
| | | dtype = hidden.dtype
|
| | | batch_size, len_time, hidden_size = hidden.size()
|
| | | threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
|
| | |
|
| | | frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
|
| | | fires = torch.zeros(batch_size, len_time, dtype=dtype, device=device)
|
| | |
|
| | | # prefix_sum = torch.cumsum(alphas, dim=1)
|
| | | prefix_sum = torch.cumsum(alphas, dim=1, dtype=torch.float64).to(
|
| | | torch.float32
|
| | | ) # cumsum precision degradation cause wrong result in extreme
|
| | | prefix_sum_floor = torch.floor(prefix_sum)
|
| | | dislocation_prefix_sum = torch.roll(prefix_sum, 1, dims=1)
|
| | | dislocation_prefix_sum_floor = torch.floor(dislocation_prefix_sum)
|
| | |
|
| | | dislocation_prefix_sum_floor[:, 0] = 0
|
| | | dislocation_diff = prefix_sum_floor - dislocation_prefix_sum_floor
|
| | |
|
| | | fire_idxs = dislocation_diff > 0
|
| | | fires[fire_idxs] = 1
|
| | | fires = fires + prefix_sum - prefix_sum_floor
|
| | |
|
| | | # prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
|
| | | prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).repeat((1, 1, hidden_size)) * hidden, dim=1)
|
| | | frames = prefix_sum_hidden[fire_idxs]
|
| | | shift_frames = torch.roll(frames, 1, dims=0)
|
| | |
|
| | | batch_len = fire_idxs.sum(1)
|
| | | batch_idxs = torch.cumsum(batch_len, dim=0)
|
| | | shift_batch_idxs = torch.roll(batch_idxs, 1, dims=0)
|
| | | shift_batch_idxs[0] = 0
|
| | | shift_frames[shift_batch_idxs] = 0
|
| | |
|
| | | remains = fires - torch.floor(fires)
|
| | | # remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
|
| | | remain_frames = remains[fire_idxs].unsqueeze(-1).repeat((1, hidden_size)) * hidden[fire_idxs]
|
| | |
|
| | | shift_remain_frames = torch.roll(remain_frames, 1, dims=0)
|
| | | shift_remain_frames[shift_batch_idxs] = 0
|
| | |
|
| | | frames = frames - shift_frames + shift_remain_frames - remain_frames
|
| | |
|
| | | # max_label_len = batch_len.max()
|
| | | max_label_len = alphas.sum(dim=-1)
|
| | | max_label_len = torch.floor(max_label_len).max().to(dtype=torch.int64)
|
| | |
|
| | | # frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
|
| | | frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
|
| | | indices = torch.arange(max_label_len, device=device).expand(batch_size, -1)
|
| | | frame_fires_idxs = indices < batch_len.unsqueeze(1)
|
| | | frame_fires[frame_fires_idxs] = frames
|
| | | return frame_fires, fires
|
| | |
|
| | |
|
| | | @torch.jit.script
|
| | | def cif_export(hidden, alphas, threshold: float):
|
| | | batch_size, len_time, hidden_size = hidden.size()
|
| | | threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
|
| | |
|
| | | # loop varss
|
| | | integrate = torch.zeros([batch_size], dtype=alphas.dtype, device=hidden.device)
|
| | | frame = torch.zeros([batch_size, hidden_size], dtype=hidden.dtype, device=hidden.device)
|
| | | # intermediate vars along time
|
| | | list_fires = []
|
| | | list_frames = []
|
| | |
|
| | | for t in range(len_time):
|
| | | alpha = alphas[:, t]
|
| | | distribution_completion = (
|
| | | torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device) - integrate
|
| | | )
|
| | |
|
| | | integrate += alpha
|
| | | list_fires.append(integrate)
|
| | |
|
| | | fire_place = integrate >= threshold
|
| | | integrate = torch.where(
|
| | | fire_place,
|
| | | integrate - torch.ones([batch_size], dtype=alphas.dtype, device=hidden.device),
|
| | | integrate,
|
| | | )
|
| | | cur = torch.where(fire_place, distribution_completion, alpha)
|
| | | remainds = alpha - cur
|
| | |
|
| | | frame += cur[:, None] * hidden[:, t, :]
|
| | | list_frames.append(frame)
|
| | | frame = torch.where(
|
| | | fire_place[:, None].repeat(1, hidden_size), remainds[:, None] * hidden[:, t, :], frame
|
| | | )
|
| | |
|
| | | fires = torch.stack(list_fires, 1)
|
| | | frames = torch.stack(list_frames, 1)
|
| | |
|
| | | fire_idxs = fires >= threshold
|
| | | frame_fires = torch.zeros_like(hidden)
|
| | | max_label_len = frames[0, fire_idxs[0]].size(0)
|
| | | for b in range(batch_size):
|
| | | frame_fire = frames[b, fire_idxs[b]]
|
| | | frame_len = frame_fire.size(0)
|
| | | frame_fires[b, :frame_len, :] = frame_fire
|
| | |
|
| | | if frame_len >= max_label_len:
|
| | | max_label_len = frame_len
|
| | | frame_fires = frame_fires[:, :max_label_len, :]
|
| | | return frame_fires, fires
|
| | |
|
| | |
|
| | | class mae_loss(torch.nn.Module):
|
| | |
|
| | | def __init__(self, normalize_length=False):
|
| | | super(mae_loss, self).__init__()
|
| | | self.normalize_length = normalize_length
|
| | | self.criterion = torch.nn.L1Loss(reduction='sum')
|
| | | self.criterion = torch.nn.L1Loss(reduction="sum")
|
| | |
|
| | | def forward(self, token_length, pre_token_length):
|
| | | loss_token_normalizer = token_length.size(0)
|
| | |
| | | list_fires.append(integrate)
|
| | |
|
| | | fire_place = integrate >= threshold
|
| | | integrate = torch.where(fire_place,
|
| | | integrate - torch.ones([batch_size], device=hidden.device),
|
| | | integrate)
|
| | | cur = torch.where(fire_place,
|
| | | distribution_completion,
|
| | | alpha)
|
| | | integrate = torch.where(
|
| | | fire_place, integrate - torch.ones([batch_size], device=hidden.device), integrate
|
| | | )
|
| | | cur = torch.where(fire_place, distribution_completion, alpha)
|
| | | remainds = alpha - cur
|
| | |
|
| | | frame += cur[:, None] * hidden[:, t, :]
|
| | | list_frames.append(frame)
|
| | | frame = torch.where(fire_place[:, None].repeat(1, hidden_size),
|
| | | remainds[:, None] * hidden[:, t, :],
|
| | | frame)
|
| | | frame = torch.where(
|
| | | fire_place[:, None].repeat(1, hidden_size), remainds[:, None] * hidden[:, t, :], frame
|
| | | )
|
| | |
|
| | | fires = torch.stack(list_fires, 1)
|
| | | frames = torch.stack(list_frames, 1)
|
| | |
| | | pad_l = torch.zeros([max_label_len - l.size(0), hidden_size], device=hidden.device)
|
| | | list_ls.append(torch.cat([l, pad_l], 0))
|
| | | return torch.stack(list_ls, 0), fires
|
| | |
|
| | |
|
| | | def cif_wo_hidden_v1(alphas, threshold, return_fire_idxs=False):
|
| | | batch_size, len_time = alphas.size()
|
| | | device = alphas.device
|
| | | dtype = alphas.dtype
|
| | |
|
| | | threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
|
| | |
|
| | | fires = torch.zeros(batch_size, len_time, dtype=dtype, device=device)
|
| | |
|
| | | # prefix_sum = torch.cumsum(alphas, dim=1)
|
| | | prefix_sum = torch.cumsum(alphas, dim=1, dtype=torch.float64).to(
|
| | | torch.float32
|
| | | ) # cumsum precision degradation cause wrong result in extreme
|
| | | prefix_sum_floor = torch.floor(prefix_sum)
|
| | | dislocation_prefix_sum = torch.roll(prefix_sum, 1, dims=1)
|
| | | dislocation_prefix_sum_floor = torch.floor(dislocation_prefix_sum)
|
| | |
|
| | | dislocation_prefix_sum_floor[:, 0] = 0
|
| | | dislocation_diff = prefix_sum_floor - dislocation_prefix_sum_floor
|
| | |
|
| | | fire_idxs = dislocation_diff > 0
|
| | | fires[fire_idxs] = 1
|
| | | fires = fires + prefix_sum - prefix_sum_floor
|
| | | if return_fire_idxs:
|
| | | return fires, fire_idxs
|
| | | return fires
|
| | |
|
| | |
|
| | | def cif_v1(hidden, alphas, threshold):
|
| | | fires, fire_idxs = cif_wo_hidden_v1(alphas, threshold, return_fire_idxs=True)
|
| | |
|
| | | device = hidden.device
|
| | | dtype = hidden.dtype
|
| | | batch_size, len_time, hidden_size = hidden.size()
|
| | | # frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
|
| | | # prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
|
| | | frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
|
| | | prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).repeat((1, 1, hidden_size)) * hidden, dim=1)
|
| | |
|
| | | frames = prefix_sum_hidden[fire_idxs]
|
| | | shift_frames = torch.roll(frames, 1, dims=0)
|
| | |
|
| | | batch_len = fire_idxs.sum(1)
|
| | | batch_idxs = torch.cumsum(batch_len, dim=0)
|
| | | shift_batch_idxs = torch.roll(batch_idxs, 1, dims=0)
|
| | | shift_batch_idxs[0] = 0
|
| | | shift_frames[shift_batch_idxs] = 0
|
| | |
|
| | | remains = fires - torch.floor(fires)
|
| | | # remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
|
| | | remain_frames = remains[fire_idxs].unsqueeze(-1).repeat((1, hidden_size)) * hidden[fire_idxs]
|
| | |
|
| | | shift_remain_frames = torch.roll(remain_frames, 1, dims=0)
|
| | | shift_remain_frames[shift_batch_idxs] = 0
|
| | |
|
| | | frames = frames - shift_frames + shift_remain_frames - remain_frames
|
| | |
|
| | | # max_label_len = batch_len.max()
|
| | | max_label_len = (
|
| | | torch.round(alphas.sum(-1)).int().max()
|
| | | ) # torch.round to calculate the max length
|
| | |
|
| | | # frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
|
| | | frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
|
| | | indices = torch.arange(max_label_len, device=device).expand(batch_size, -1)
|
| | | frame_fires_idxs = indices < batch_len.unsqueeze(1)
|
| | | frame_fires[frame_fires_idxs] = frames
|
| | | return frame_fires, fires
|
| | |
|
| | |
|
| | | def cif_wo_hidden(alphas, threshold):
|
| | |
| | | list_fires.append(integrate)
|
| | |
|
| | | fire_place = integrate >= threshold
|
| | | integrate = torch.where(fire_place,
|
| | | integrate - torch.ones([batch_size], device=alphas.device)*threshold,
|
| | | integrate)
|
| | | integrate = torch.where(
|
| | | fire_place,
|
| | | integrate - torch.ones([batch_size], device=alphas.device) * threshold,
|
| | | integrate,
|
| | | )
|
| | |
|
| | | fires = torch.stack(list_fires, 1)
|
| | | return fires
|
| | |
|