莫拉古
2024-11-29 ae49b2a8e1bc676e6014d8a12ebeec947b655e3e
funasr/models/sense_voice/model.py
@@ -196,13 +196,13 @@
                "inf"
            )  # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            scores = scores.masked_fill(mask, min_value)
            self.attn = torch.softmax(scores, dim=-1).masked_fill(
            attn = torch.softmax(scores, dim=-1).masked_fill(
                mask, 0.0
            )  # (batch, head, time1, time2)
        else:
            self.attn = torch.softmax(scores, dim=-1)  # (batch, head, time1, time2)
            attn = torch.softmax(scores, dim=-1)  # (batch, head, time1, time2)
        p_attn = self.dropout(self.attn)
        p_attn = self.dropout(attn)
        x = torch.matmul(p_attn, value)  # (batch, head, time1, d_k)
        x = (
            x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
@@ -555,7 +555,8 @@
        ilens: torch.Tensor,
    ):
        """Embed positions in tensor."""
        masks = sequence_mask(ilens, device=ilens.device)[:, None, :]
        maxlen = xs_pad.shape[1]
        masks = sequence_mask(ilens, maxlen = maxlen, device=ilens.device)[:, None, :]
        xs_pad *= self.output_size() ** 0.5
@@ -644,7 +645,13 @@
        self.embed = torch.nn.Embedding(
            7 + len(self.lid_dict) + len(self.textnorm_dict), input_size
        )
        self.emo_dict = {"unk": 25009, "happy": 25001, "sad": 25002, "angry": 25003, "neutral": 25004}
        self.emo_dict = {
            "unk": 25009,
            "happy": 25001,
            "sad": 25002,
            "angry": 25003,
            "neutral": 25004,
        }
        self.criterion_att = LabelSmoothingLoss(
            size=self.vocab_size,
@@ -874,7 +881,7 @@
        ctc_logits = self.ctc.log_softmax(encoder_out)
        if kwargs.get("ban_emo_unk", False):
            ctc_logits[:, :, self.emo_dict["unk"]] = -float("inf")
        results = []
        b, n, d = encoder_out.size()
        if isinstance(key[0], (list, tuple)):