From f4f545b7243435116f3cedc4f42cb39bfed3331e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 30 四月 2024 00:06:43 +0800
Subject: [PATCH] batch

---
 funasr/models/sense_voice/model.py   |  235 +++++++++++++++++++++++++++++
 funasr/models/sense_voice/decoder.py |  202 +++++++++++++++++++++++++
 2 files changed, 437 insertions(+), 0 deletions(-)

diff --git a/funasr/models/sense_voice/decoder.py b/funasr/models/sense_voice/decoder.py
index 133508f..4986d50 100644
--- a/funasr/models/sense_voice/decoder.py
+++ b/funasr/models/sense_voice/decoder.py
@@ -335,3 +335,205 @@
         x = (x @ torch.transpose(self.token_embedding.weight.to(x.dtype), 0, 1)).float()
 
         return x
+
+
+class MultiHeadedAttentionSANMDecoder(nn.Module):
+    """Multi-Head Attention layer.
+
+    Args:
+        n_head (int): The number of heads.
+        n_feat (int): The number of features.
+        dropout_rate (float): Dropout rate.
+
+    """
+
+    def __init__(self, n_feat, dropout_rate, kernel_size, sanm_shfit=0):
+        """Construct an MultiHeadedAttention object."""
+        super().__init__()
+
+        self.dropout = nn.Dropout(p=dropout_rate)
+
+        self.fsmn_block = nn.Conv1d(
+            n_feat, n_feat, kernel_size, stride=1, padding=0, groups=n_feat, bias=False
+        )
+        # padding
+        # padding
+        left_padding = (kernel_size - 1) // 2
+        if sanm_shfit > 0:
+            left_padding = left_padding + sanm_shfit
+        right_padding = kernel_size - 1 - left_padding
+        self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)
+        self.kernel_size = kernel_size
+
+    def forward(self, inputs, mask, cache=None, mask_shfit_chunk=None):
+        """
+        :param x: (#batch, time1, size).
+        :param mask: Mask tensor (#batch, 1, time)
+        :return:
+        """
+        # print("in fsmn, inputs", inputs.size())
+        b, t, d = inputs.size()
+        # logging.info(
+        #     "mask: {}".format(mask.size()))
+        if mask is not None:
+            mask = torch.reshape(mask, (b, -1, 1))
+            # logging.info("in fsmn, mask: {}, {}".format(mask.size(), mask[0:100:50, :, :]))
+            if mask_shfit_chunk is not None:
+                # logging.info("in fsmn, mask_fsmn: {}, {}".format(mask_shfit_chunk.size(), mask_shfit_chunk[0:100:50, :, :]))
+                mask = mask * mask_shfit_chunk
+            # logging.info("in fsmn, mask_after_fsmn: {}, {}".format(mask.size(), mask[0:100:50, :, :]))
+            # print("in fsmn, mask", mask.size())
+            # print("in fsmn, inputs", inputs.size())
+            inputs = inputs * mask
+
+        x = inputs.transpose(1, 2)
+        b, d, t = x.size()
+        if cache is None:
+            # print("in fsmn, cache is None, x", x.size())
+
+            x = self.pad_fn(x)
+            if not self.training:
+                cache = x
+        else:
+            # print("in fsmn, cache is not None, x", x.size())
+            # x = torch.cat((x, cache), dim=2)[:, :, :-1]
+            # if t < self.kernel_size:
+            #     x = self.pad_fn(x)
+            x = torch.cat((cache[:, :, 1:], x), dim=2)
+            x = x[:, :, -(self.kernel_size + t - 1) :]
+            # print("in fsmn, cache is not None, x_cat", x.size())
+            cache = x
+        x = self.fsmn_block(x)
+        x = x.transpose(1, 2)
+        # print("in fsmn, fsmn_out", x.size())
+        if x.size(1) != inputs.size(1):
+            inputs = inputs[:, -1, :]
+
+        x = x + inputs
+        x = self.dropout(x)
+        if mask is not None:
+            x = x * mask
+        return x, cache
+
+
+class ResidualAttentionBlockFSMN(nn.Module):
+    def __init__(self, n_state: int, n_head: int, cross_attention: bool = False, **kwargs):
+        super().__init__()
+
+        self.attn = MultiHeadedAttentionSANMDecoder(
+            n_state,
+            kwargs.get("self_attention_dropout_rate"),
+            kwargs.get("kernel_size", 20),
+            kwargs.get("sanm_shfit", 10),
+        )
+        self.attn_ln = LayerNorm(n_state)
+
+        self.cross_attn = MultiHeadAttention(n_state, n_head) if cross_attention else None
+        self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
+
+        n_mlp = n_state * 4
+        self.mlp = nn.Sequential(Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state))
+        self.mlp_ln = LayerNorm(n_state)
+
+    def forward(
+        self,
+        x: Tensor,
+        xa: Optional[Tensor] = None,
+        mask: Optional[Tensor] = None,
+        kv_cache: Optional[dict] = None,
+        **kwargs,
+    ):
+        is_pad_mask = kwargs.get("is_pad_mask", False)
+        is_pad_memory_mask = kwargs.get("is_pad_memory_mask", False)
+        x = x + self.attn(self.attn_ln(x), mask=None, kv_cache=kv_cache, is_pad_mask=is_pad_mask)[0]
+        if self.cross_attn:
+            x = (
+                x
+                + self.cross_attn(
+                    self.cross_attn_ln(x), xa, kv_cache=kv_cache, is_pad_mask=is_pad_memory_mask
+                )[0]
+            )
+        x = x + self.mlp(self.mlp_ln(x))
+        return x
+
+
+@tables.register("decoder_classes", "SenseVoiceDecoderFSMN")
+class SenseVoiceDecoderFSMN(nn.Module):
+    def __init__(self, n_vocab: int, n_ctx: int, n_state: int, n_head: int, n_layer: int, **kwargs):
+        super().__init__()
+
+        self.token_embedding = nn.Embedding(n_vocab, n_state)
+        self.positional_embedding = nn.Parameter(torch.empty(n_ctx, n_state))
+
+        self.blocks = nn.ModuleList(
+            [
+                ResidualAttentionBlockFSMN(
+                    n_state, n_head, cross_attention=True, layer_id=i, **kwargs
+                )
+                for i in range(n_layer)
+            ]
+        )
+        self.ln = LayerNorm(n_state)
+
+        mask = torch.empty(n_ctx, n_ctx).fill_(-np.inf).triu_(1)
+        self.register_buffer("mask", mask, persistent=False)
+
+        self.use_padmask = kwargs.get("use_padmask", True)
+
+    def forward(
+        self,
+        x: torch.Tensor,
+        xa: torch.Tensor,
+        kv_cache: Optional[dict] = None,
+        **kwargs,
+    ):
+        """Forward decoder.
+
+        Args:
+                hs_pad: encoded memory, float32  (batch, maxlen_in, feat)
+                hlens: (batch)
+                ys_in_pad:
+                        input token ids, int64 (batch, maxlen_out)
+                        if input_layer == "embed"
+                        input tensor (batch, maxlen_out, #mels) in the other cases
+                ys_in_lens: (batch)
+        Returns:
+                (tuple): tuple containing:
+
+                x: decoded token score before softmax (batch, maxlen_out, token)
+                        if use_output_layer is True,
+                olens: (batch, )
+        """
+        # import pdb;pdb.set_trace()
+        use_padmask = self.use_padmask
+        hlens = kwargs.get("hlens", None)
+
+        ys_in_lens = kwargs.get("ys_in_lens", None)
+
+        offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
+        tgt, memory = x, xa
+        tgt[tgt == -1] = 0
+        tgt = self.token_embedding(tgt) + self.positional_embedding[offset : offset + tgt.size(1)]
+        # tgt = self.dropout(tgt)
+
+        x = tgt.to(memory.dtype)
+
+        if use_padmask and hlens is not None:
+            memory_mask = (~make_pad_mask(hlens)[:, None, :]).to(memory.device)
+        else:
+            memory_mask = None
+
+        for layer, block in enumerate(self.blocks):
+            x = block(
+                x,
+                memory,
+                mask=self.mask,
+                memory_mask=memory_mask,
+                is_pad_mask=False,
+                is_pad_memory_mask=True,
+            )
+
+        x = self.ln(x)
+        x = (x @ torch.transpose(self.token_embedding.weight.to(x.dtype), 0, 1)).float()
+
+        return x
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index d06776b..c12107e 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -476,3 +476,238 @@
         results.append(result_i)
 
         return results, meta_data
+
+
+@tables.register("model_classes", "SenseVoiceFSMN")
+class SenseVoiceFSMN(nn.Module):
+    def __init__(self, *args, **kwargs):
+        super().__init__()
+
+        dims = kwargs.get("dims", {})
+        dims = whisper.model.ModelDimensions(**dims)
+        model = whisper.model.Whisper(dims=dims)
+
+        # encoder
+        model.encoder.downsample_rate = kwargs.get("downsample_rate", 4)
+        model.encoder.use_padmask = kwargs.get("use_padmask", True)
+        from .encoder import sense_voice_encode_forward
+
+        model.encoder.forward = types.MethodType(sense_voice_encode_forward, model.encoder)
+
+        # decoder
+        del model.decoder
+        decoder = kwargs.get("decoder", "SenseVoiceDecoder")
+        decoder_conf = kwargs.get("decoder_conf", {})
+        decoder_class = tables.decoder_classes.get(decoder)
+        decoder = decoder_class(
+            vocab_size=dims.n_vocab,
+            encoder_output_size=dims.n_audio_state,
+            **decoder_conf,
+        )
+        model.decoder = decoder
+
+        self.model = model
+
+        self.encoder_output_size = self.model.dims.n_audio_state
+
+        self.activation_checkpoint = kwargs.get("activation_checkpoint", False)
+        self.ignore_id = kwargs.get("ignore_id", -1)
+        self.vocab_size = kwargs.get("vocab_size", -1)
+        self.length_normalized_loss = kwargs.get("length_normalized_loss", True)
+        self.criterion_att = LabelSmoothingLoss(
+            size=self.vocab_size,
+            padding_idx=self.ignore_id,
+            smoothing=kwargs.get("lsm_weight", 0.0),
+            normalize_length=self.length_normalized_loss,
+        )
+
+        specaug = kwargs.get("specaug", None)
+        if specaug is not None:
+            specaug_class = tables.specaug_classes.get(specaug)
+            specaug = specaug_class(**kwargs.get("specaug_conf", {}))
+        self.specaug = specaug
+
+    def forward(
+        self,
+        speech: torch.Tensor,
+        speech_lengths: torch.Tensor,
+        text: torch.Tensor,
+        text_lengths: torch.Tensor,
+        **kwargs,
+    ):
+        target_mask = kwargs.get("target_mask", None)
+
+        # import pdb;
+        # pdb.set_trace()
+        if len(text_lengths.size()) > 1:
+            text_lengths = text_lengths[:, 0]
+        if len(speech_lengths.size()) > 1:
+            speech_lengths = speech_lengths[:, 0]
+
+        batch_size, frames, _ = speech.shape
+        _, text_tokens = text.shape
+
+        if self.activation_checkpoint:
+            from torch.utils.checkpoint import checkpoint
+
+            encoder_out, encoder_out_lens = checkpoint(
+                self.encode, speech, speech_lengths, use_reentrant=False
+            )
+        else:
+            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
+
+        loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
+            encoder_out, encoder_out_lens, text, text_lengths, target_mask=target_mask
+        )
+        loss = loss_att
+        stats = {}
+        stats["acc"] = acc_att
+        stats["loss"] = torch.clone(loss.detach())
+        stats["batch_size"] = batch_size
+        stats["batch_size_x_frames"] = frames * batch_size
+        stats["batch_size_real_frames"] = speech_lengths.sum().item()
+        stats["padding_frames"] = stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
+        stats["batch_size_x_tokens"] = text_tokens * batch_size
+        stats["batch_size_real_tokens"] = text_lengths.sum().item()
+        stats["padding_tokens"] = stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
+        stats["batch_size_x_frames_plus_tokens"] = (text_tokens + frames) * batch_size
+
+        # force_gatherable: to-device and to-tensor if scalar for DataParallel
+        if self.length_normalized_loss:
+            batch_size = int((text_lengths + 1).sum())
+        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
+        return loss, stats, weight
+
+    def encode(
+        self,
+        speech: torch.Tensor,
+        speech_lengths: torch.Tensor,
+        **kwargs,
+    ):
+        """Encoder. Note that this method is used by asr_inference.py
+        Args:
+                speech: (Batch, Length, ...)
+                speech_lengths: (Batch, )
+                ind: int
+        """
+        with autocast(False):
+            # Data augmentation
+            if self.specaug is not None and self.training:
+                speech, speech_lengths = self.specaug(speech, speech_lengths)
+
+        # Forward encoder
+        encoder_out, encoder_out_lens = self.model.encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        return encoder_out, encoder_out_lens
+
+    def _calc_att_loss(
+        self,
+        encoder_out: torch.Tensor,
+        encoder_out_lens: torch.Tensor,
+        ys_pad: torch.Tensor,
+        ys_pad_lens: torch.Tensor,
+        **kwargs,
+    ):
+        target_mask = kwargs.get("target_mask", None)
+        stats = {}
+
+        # 1. Forward decoder
+        decoder_out = self.model.decoder(
+            x=ys_pad, xa=encoder_out, hlens=encoder_out_lens, ys_in_lens=ys_pad_lens
+        )
+        # decoder_out, _ = self.model.decoder(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
+        # 2. Compute attention loss
+        mask = torch.ones_like(ys_pad) * (-1)
+        ys_pad_mask = (ys_pad * target_mask + mask * (1 - target_mask)).to(torch.int64)
+        ys_pad_mask[ys_pad_mask == 0] = -1
+        loss_att = self.criterion_att(decoder_out[:, :-1, :], ys_pad_mask[:, 1:])
+
+        with torch.no_grad():
+            preds = torch.argmax(decoder_out, -1)
+            acc_att = compute_accuracy(
+                preds[:, :-1], ys_pad_mask[:, 1:], ignore_label=self.ignore_id
+            )
+
+        return loss_att, acc_att, None, None
+
+    def inference(
+        self,
+        data_in,
+        data_lengths=None,
+        key: list = None,
+        tokenizer=None,
+        frontend=None,
+        **kwargs,
+    ):
+        if kwargs.get("batch_size", 1) > 1:
+            raise NotImplementedError("batch decoding is not implemented")
+
+        if frontend is None and not hasattr(self, "frontend"):
+            frontend_class = tables.frontend_classes.get("WhisperFrontend")
+            frontend = frontend_class(
+                n_mels=self.model.dims.n_mels, do_pad_trim=kwargs.get("do_pad_trim", True)
+            )
+            self.frontend = frontend
+        else:
+            frontend = frontend if frontend is not None else self.frontend
+
+        meta_data = {}
+        if (
+            isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
+        ):  # fbank
+            speech, speech_lengths = data_in, data_lengths
+            if len(speech.shape) < 3:
+                speech = speech[None, :, :]
+            if speech_lengths is None:
+                speech_lengths = speech.shape[1]
+        else:
+            # extract fbank feats
+            time1 = time.perf_counter()
+            audio_sample_list = load_audio_text_image_video(
+                data_in,
+                fs=frontend.fs if hasattr(frontend, "fs") else 16000,
+                audio_fs=kwargs.get("fs", 16000),
+                data_type=kwargs.get("data_type", "sound"),
+                tokenizer=tokenizer,
+            )
+            time2 = time.perf_counter()
+            meta_data["load_data"] = f"{time2 - time1:0.3f}"
+            speech, speech_lengths = extract_fbank(
+                audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
+            )
+            time3 = time.perf_counter()
+            meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+            frame_shift = frontend.frame_shift if hasattr(frontend, "frame_shift") else 10
+            lfr_n = frontend.lfr_n if hasattr(frontend, "lfr_n") else 1
+            meta_data["batch_data_time"] = speech_lengths.sum().item() * frame_shift * lfr_n / 1000
+
+        speech = speech.to(device=kwargs["device"])[0, :, :]
+        speech_lengths = speech_lengths.to(device=kwargs["device"])
+
+        DecodingOptions = kwargs.get("DecodingOptions", {})
+        task = DecodingOptions.get("task", "ASR")
+        if isinstance(task, str):
+            task = [task]
+        task = "".join([f"<|{x}|>" for x in task])
+        initial_prompt = kwargs.get("initial_prompt", f"<|startoftranscript|>{task}")
+        DecodingOptions["initial_prompt"] = initial_prompt
+
+        language = DecodingOptions.get("language", None)
+        language = None if language == "auto" else language
+        DecodingOptions["language"] = language
+
+        DecodingOptions["vocab_path"] = kwargs["tokenizer_conf"].get("vocab_path", None)
+
+        if "without_timestamps" not in DecodingOptions:
+            DecodingOptions["without_timestamps"] = True
+
+        options = whisper.DecodingOptions(**DecodingOptions)
+
+        result = whisper.decode(self.model, speech, options)
+        text = f"{result.text}"
+        results = []
+        result_i = {"key": key[0], "text": text}
+
+        results.append(result_i)
+
+        return results, meta_data

--
Gitblit v1.9.1