From abb33d6b2097e5b0643326bc1b376a63cdc2f967 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 24 六月 2024 17:06:21 +0800
Subject: [PATCH] Dev gzf deepspeed (#1844)

---
 funasr/models/sense_voice/model.py |  282 +-------------------------------------------------------
 1 files changed, 6 insertions(+), 276 deletions(-)

diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index a9b2149..9db6539 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -10,7 +10,7 @@
 from torch import Tensor
 from torch import nn
 from torch.cuda.amp import autocast
-from funasr.metrics.compute_acc import compute_accuracy
+from funasr.metrics.compute_acc import compute_accuracy, th_accuracy
 from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
 from funasr.train_utils.device_funcs import force_gatherable
 from . import whisper_lib as whisper
@@ -662,9 +662,11 @@
         else:
             encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
-        loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
-            encoder_out, encoder_out_lens, text, text_lengths, target_mask=target_mask
-        )
+        with autocast(False):
+            loss_att, acc_att, cer_att, wer_att = self._calc_att_loss(
+                encoder_out, encoder_out_lens, text, text_lengths, target_mask=target_mask
+            )
+
         loss = loss_att
         stats = {}
         stats["acc"] = acc_att
@@ -1390,275 +1392,3 @@
 
 from funasr.models.paraformer.search import Hypothesis
 from funasr.utils import postprocess_utils
-
-
-@tables.register("model_classes", "SenseVoiceSANMCTC")
-class SenseVoiceSANMCTC(nn.Module):
-    """CTC-attention hybrid Encoder-Decoder model"""
-
-    def __init__(
-        self,
-        specaug: str = None,
-        specaug_conf: dict = None,
-        normalize: str = None,
-        normalize_conf: dict = None,
-        encoder: str = None,
-        encoder_conf: dict = None,
-        ctc_conf: dict = None,
-        input_size: int = 80,
-        vocab_size: int = -1,
-        ignore_id: int = -1,
-        blank_id: int = 0,
-        sos: int = 1,
-        eos: int = 2,
-        length_normalized_loss: bool = False,
-        **kwargs,
-    ):
-
-        super().__init__()
-
-        if specaug is not None:
-            specaug_class = tables.specaug_classes.get(specaug)
-            specaug = specaug_class(**specaug_conf)
-        if normalize is not None:
-            normalize_class = tables.normalize_classes.get(normalize)
-            normalize = normalize_class(**normalize_conf)
-        encoder_class = tables.encoder_classes.get(encoder)
-        encoder = encoder_class(input_size=input_size, **encoder_conf)
-        encoder_output_size = encoder.output_size()
-
-        if ctc_conf is None:
-            ctc_conf = {}
-        ctc = CTC(odim=vocab_size, encoder_output_size=encoder_output_size, **ctc_conf)
-
-        self.blank_id = blank_id
-        self.sos = sos if sos is not None else vocab_size - 1
-        self.eos = eos if eos is not None else vocab_size - 1
-        self.vocab_size = vocab_size
-        self.ignore_id = ignore_id
-        self.specaug = specaug
-        self.normalize = normalize
-        self.encoder = encoder
-        self.error_calculator = None
-
-        self.ctc = ctc
-
-        self.length_normalized_loss = length_normalized_loss
-        self.encoder_output_size = encoder_output_size
-
-        self.lid_dict = {"zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
-        self.textnorm_dict = {"withtextnorm": 14, "wotextnorm": 15}
-        self.embed = torch.nn.Embedding(8 + len(self.lid_dict) + len(self.textnorm_dict), 560)
-
-    def forward(
-        self,
-        speech: torch.Tensor,
-        speech_lengths: torch.Tensor,
-        text: torch.Tensor,
-        text_lengths: torch.Tensor,
-        **kwargs,
-    ):
-        """Encoder + Decoder + Calc loss
-        Args:
-                speech: (Batch, Length, ...)
-                speech_lengths: (Batch, )
-                text: (Batch, Length)
-                text_lengths: (Batch,)
-        """
-        # import pdb;
-        # pdb.set_trace()
-        if len(text_lengths.size()) > 1:
-            text_lengths = text_lengths[:, 0]
-        if len(speech_lengths.size()) > 1:
-            speech_lengths = speech_lengths[:, 0]
-
-        batch_size = speech.shape[0]
-
-        # 1. Encoder
-        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
-
-        loss_ctc, cer_ctc = None, None
-        stats = dict()
-
-        loss_ctc, cer_ctc = self._calc_ctc_loss(encoder_out, encoder_out_lens, text, text_lengths)
-
-        loss = loss_ctc
-
-        # Collect total loss stats
-        stats["loss"] = torch.clone(loss.detach())
-
-        # force_gatherable: to-device and to-tensor if scalar for DataParallel
-        if self.length_normalized_loss:
-            batch_size = int((text_lengths + 1).sum())
-        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
-        return loss, stats, weight
-
-    def encode(
-        self,
-        speech: torch.Tensor,
-        speech_lengths: torch.Tensor,
-        **kwargs,
-    ):
-        """Frontend + Encoder. Note that this method is used by asr_inference.py
-        Args:
-                speech: (Batch, Length, ...)
-                speech_lengths: (Batch, )
-                ind: int
-        """
-
-        # Data augmentation
-        if self.specaug is not None and self.training:
-            speech, speech_lengths = self.specaug(speech, speech_lengths)
-
-        # Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
-        if self.normalize is not None:
-            speech, speech_lengths = self.normalize(speech, speech_lengths)
-
-        # Forward encoder
-        # feats: (Batch, Length, Dim)
-        # -> encoder_out: (Batch, Length2, Dim2)
-        encoder_out, encoder_out_lens = self.encoder(speech, speech_lengths)
-
-        return encoder_out, encoder_out_lens
-
-    def _calc_ctc_loss(
-        self,
-        encoder_out: torch.Tensor,
-        encoder_out_lens: torch.Tensor,
-        ys_pad: torch.Tensor,
-        ys_pad_lens: torch.Tensor,
-    ):
-        # Calc CTC loss
-        loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
-
-        # Calc CER using CTC
-        cer_ctc = None
-        if not self.training and self.error_calculator is not None:
-            ys_hat = self.ctc.argmax(encoder_out).data
-            cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
-        return loss_ctc, cer_ctc
-
-    def inference(
-        self,
-        data_in,
-        data_lengths=None,
-        key: list = None,
-        tokenizer=None,
-        frontend=None,
-        **kwargs,
-    ):
-
-        if kwargs.get("batch_size", 1) > 1:
-            raise NotImplementedError("batch decoding is not implemented")
-
-        meta_data = {}
-        if (
-            isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
-        ):  # fbank
-            speech, speech_lengths = data_in, data_lengths
-            if len(speech.shape) < 3:
-                speech = speech[None, :, :]
-            if speech_lengths is None:
-                speech_lengths = speech.shape[1]
-        else:
-            # extract fbank feats
-            time1 = time.perf_counter()
-            audio_sample_list = load_audio_text_image_video(
-                data_in,
-                fs=frontend.fs,
-                audio_fs=kwargs.get("fs", 16000),
-                data_type=kwargs.get("data_type", "sound"),
-                tokenizer=tokenizer,
-            )
-            time2 = time.perf_counter()
-            meta_data["load_data"] = f"{time2 - time1:0.3f}"
-            speech, speech_lengths = extract_fbank(
-                audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
-            )
-            time3 = time.perf_counter()
-            meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
-            meta_data["batch_data_time"] = (
-                speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
-            )
-
-        speech = speech.to(device=kwargs["device"])
-        speech_lengths = speech_lengths.to(device=kwargs["device"])
-
-        language = kwargs.get("language", None)
-        if language is not None:
-            language_query = self.embed(
-                torch.LongTensor(
-                    [[self.lid_dict[language] if language in self.lid_dict else 0]]
-                ).to(speech.device)
-            ).repeat(speech.size(0), 1, 1)
-        else:
-            language_query = self.embed(torch.LongTensor([[0]]).to(speech.device)).repeat(
-                speech.size(0), 1, 1
-            )
-        textnorm = kwargs.get("text_norm", "wotextnorm")
-        textnorm_query = self.embed(
-            torch.LongTensor([[self.textnorm_dict[textnorm]]]).to(speech.device)
-        ).repeat(speech.size(0), 1, 1)
-        speech = torch.cat((textnorm_query, speech), dim=1)
-        speech_lengths += 1
-
-        event_emo_query = self.embed(torch.LongTensor([[1, 2]]).to(speech.device)).repeat(
-            speech.size(0), 1, 1
-        )
-        input_query = torch.cat((language_query, event_emo_query), dim=1)
-        speech = torch.cat((input_query, speech), dim=1)
-        speech_lengths += 3
-
-        # Encoder
-        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
-        if isinstance(encoder_out, tuple):
-            encoder_out = encoder_out[0]
-
-        # c. Passed the encoder result and the beam search
-        ctc_logits = self.ctc.log_softmax(encoder_out)
-
-        results = []
-        b, n, d = encoder_out.size()
-        if isinstance(key[0], (list, tuple)):
-            key = key[0]
-        if len(key) < b:
-            key = key * b
-        for i in range(b):
-            x = ctc_logits[i, : encoder_out_lens[i], :]
-            yseq = x.argmax(dim=-1)
-            yseq = torch.unique_consecutive(yseq, dim=-1)
-            yseq = torch.tensor([self.sos] + yseq.tolist() + [self.eos], device=yseq.device)
-            nbest_hyps = [Hypothesis(yseq=yseq)]
-
-            for nbest_idx, hyp in enumerate(nbest_hyps):
-                ibest_writer = None
-                if kwargs.get("output_dir") is not None:
-                    if not hasattr(self, "writer"):
-                        self.writer = DatadirWriter(kwargs.get("output_dir"))
-                    ibest_writer = self.writer[f"{nbest_idx + 1}best_recog"]
-
-                # remove sos/eos and get results
-                last_pos = -1
-                if isinstance(hyp.yseq, list):
-                    token_int = hyp.yseq[1:last_pos]
-                else:
-                    token_int = hyp.yseq[1:last_pos].tolist()
-
-                # remove blank symbol id, which is assumed to be 0
-                token_int = list(
-                    filter(
-                        lambda x: x != self.eos and x != self.sos and x != self.blank_id, token_int
-                    )
-                )
-
-                # Change integer-ids to tokens
-                text = tokenizer.decode(token_int)
-
-                result_i = {"key": key[i], "text": text}
-                results.append(result_i)
-
-                if ibest_writer is not None:
-                    ibest_writer["token"][key[i]] = " ".join(token)
-                    ibest_writer["text"][key[i]] = text_postprocessed
-
-        return results, meta_data

--
Gitblit v1.9.1