zhifu gao
2024-04-26 1cdb3cc28d4d89a576cc06e5cd8eb80da1f3a3aa
funasr/datasets/sense_voice_datasets/datasets.py
@@ -1,3 +1,5 @@
import logging
import torch
import random
@@ -46,6 +48,9 @@
        self.float_pad_value = float_pad_value
        self.sos = kwargs.get("sos", "<|startoftranscript|>")
        self.eos = kwargs.get("eos", "<|endoftext|>")
        self.batch_size = kwargs.get("batch_size")
        self.batch_type = kwargs.get("batch_type")
        self.prompt_ids_len = 0
    def get_source_len(self, index):
        item = self.index_ds[index]
@@ -69,6 +74,9 @@
        speech, speech_lengths = extract_fbank(
            data_src, data_type=self.data_type, frontend=self.frontend, is_final=True
        )  # speech: [b, T, d]
        if speech_lengths > self.batch_size:
            return None
        speech = speech.permute(0, 2, 1)
        target = item["target"]
        if self.preprocessor_text:
@@ -80,9 +88,12 @@
        prompt = f"{self.sos}{task}{text_language}"
        prompt_ids = self.tokenizer.encode(prompt, allowed_special="all")
        prompt_ids_len = len(prompt_ids) - 1  # [sos, task]
        self.prompt_ids_len = prompt_ids_len
        target_ids = self.tokenizer.encode(target, allowed_special="all")
        target_ids_len = len(target_ids) + 1  # [lid, text]
        if target_ids_len > 200:
            return None
        eos = self.tokenizer.encode(self.eos, allowed_special="all")  # [eos]
@@ -95,23 +106,38 @@
        target_mask = (
            [0] * (prompt_ids_len) + [1] * (target_ids_len) + [1]
        )  # [sos, task, lid, text, eos]: [0, 0, 1, 1, 1]
        target_mask_lengths = len(target_mask)
        target_mask = torch.tensor(target_mask, dtype=torch.float32)
        target_mask_lengths = torch.tensor([target_mask_lengths], dtype=torch.int32)
        return {
            "speech": speech[0, :, :],
            "speech_lengths": speech_lengths,
            "text": text,
            "text_lengths": text_lengths,
            "target_mask": target_mask,
            "target_mask_lengths": target_mask_lengths,
        }
    def collator(self, samples: list = None):
        outputs = {}
        for sample in samples:
            if sample is None:
                continue
            for key in sample.keys():
                if key not in outputs:
                    outputs[key] = []
                outputs[key].append(sample[key])
        if len(outputs) < 1:
            logging.info(f"ERROR: data is empty!")
            outputs = {
                "speech": torch.rand((10, 128), dtype=torch.float32),
                "speech_lengths": torch.tensor([10], dtype=torch.int32),
                "text": torch.tensor([58836], dtype=torch.int32),
                "text_lengths": torch.tensor([1], dtype=torch.int32),
                "target_mask": torch.tensor([[0] * (self.prompt_ids_len) + [1] * (1) + [1]]),
            }
            return outputs
        for key, data_list in outputs.items():
            if isinstance(data_list[0], torch.Tensor):
@@ -124,4 +150,32 @@
                outputs[key] = torch.nn.utils.rnn.pad_sequence(
                    data_list, batch_first=True, padding_value=pad_value
                )
        if self.batch_type != "example":
            for i in range(3):
                outputs = self._filter_badcase(outputs, i=i)
        return outputs
    def _filter_badcase(self, outputs, i=0):
        b, t, _ = outputs["speech"].shape
        if b * t > self.batch_size * 1.25:
            beg = torch.randint(0, 2, ()).item()
            if b < 2:
                beg = 0
            logging.info(
                f"Warning, b * t: {b * t} > {self.batch_size}, drop half data {i}th, beg:{beg}"
            )
            for key, data_list in outputs.items():
                outputs[key] = outputs[key][beg : beg + b : 2]
            speech_lengths_max = outputs["speech_lengths"].max().item()
            outputs["speech"] = outputs["speech"][:, :speech_lengths_max, :]
            text_lengths_max = outputs["text_lengths"].max().item()
            outputs["text"] = outputs["text"][:, :text_lengths_max]
            target_mask_lengths_max = outputs["target_mask_lengths"].max().item()
            outputs["target_mask"] = outputs["target_mask"][:, :target_mask_lengths_max]
        return outputs