From 23e7ddebccd3b05cf7ef89809bcfe565ad6dfa1f Mon Sep 17 00:00:00 2001
From: majic31 <majic31@163.com>
Date: 星期二, 24 十二月 2024 10:00:14 +0800
Subject: [PATCH] Fix the variable name (#2328)
---
funasr/models/sense_voice/model.py | 81 ++++++++++++++++++++++++++++++++++++----
1 files changed, 72 insertions(+), 9 deletions(-)
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index 1311987..9d8ef84 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -19,6 +19,7 @@
from funasr.models.paraformer.search import Hypothesis
+from .utils.ctc_alignment import ctc_forced_align
class SinusoidalPositionEncoder(torch.nn.Module):
@@ -196,13 +197,13 @@
"inf"
) # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
+ attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
+ attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
+ p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
@@ -555,7 +556,8 @@
ilens: torch.Tensor,
):
"""Embed positions in tensor."""
- masks = sequence_mask(ilens, device=ilens.device)[:, None, :]
+ maxlen = xs_pad.shape[1]
+ masks = sequence_mask(ilens, maxlen=maxlen, device=ilens.device)[:, None, :]
xs_pad *= self.output_size() ** 0.5
@@ -644,7 +646,13 @@
self.embed = torch.nn.Embedding(
7 + len(self.lid_dict) + len(self.textnorm_dict), input_size
)
- self.emo_dict = {"unk": 25009, "happy": 25001, "sad": 25002, "angry": 25003, "neutral": 25004}
+ self.emo_dict = {
+ "unk": 25009,
+ "happy": 25001,
+ "sad": 25002,
+ "angry": 25003,
+ "neutral": 25004,
+ }
self.criterion_att = LabelSmoothingLoss(
size=self.vocab_size,
@@ -850,6 +858,8 @@
use_itn = kwargs.get("use_itn", False)
textnorm = kwargs.get("text_norm", None)
+ output_timestamp = kwargs.get("output_timestamp", False)
+
if textnorm is None:
textnorm = "withitn" if use_itn else "woitn"
textnorm_query = self.embed(
@@ -874,7 +884,7 @@
ctc_logits = self.ctc.log_softmax(encoder_out)
if kwargs.get("ban_emo_unk", False):
ctc_logits[:, :, self.emo_dict["unk"]] = -float("inf")
-
+
results = []
b, n, d = encoder_out.size()
if isinstance(key[0], (list, tuple)):
@@ -898,18 +908,71 @@
# Change integer-ids to tokens
text = tokenizer.decode(token_int)
- result_i = {"key": key[i], "text": text}
- results.append(result_i)
+ # result_i = {"key": key[i], "text": text}
+ # results.append(result_i)
if ibest_writer is not None:
ibest_writer["text"][key[i]] = text
+ if output_timestamp:
+ from itertools import groupby
+
+ timestamp = []
+ tokens = tokenizer.text2tokens(text)[4:]
+ logits_speech = self.ctc.softmax(encoder_out)[i, 4 : encoder_out_lens[i].item(), :]
+ pred = logits_speech.argmax(-1).cpu()
+ logits_speech[pred == self.blank_id, self.blank_id] = 0
+ align = ctc_forced_align(
+ logits_speech.unsqueeze(0).float(),
+ torch.Tensor(token_int[4:]).unsqueeze(0).long().to(logits_speech.device),
+ (encoder_out_lens - 4).long(),
+ torch.tensor(len(token_int) - 4).unsqueeze(0).long().to(logits_speech.device),
+ ignore_id=self.ignore_id,
+ )
+ pred = groupby(align[0, : encoder_out_lens[0]])
+ _start = 0
+ token_id = 0
+ ts_max = encoder_out_lens[i] - 4
+ for pred_token, pred_frame in pred:
+ _end = _start + len(list(pred_frame))
+ if pred_token != 0:
+ ts_left = max((_start * 60 - 30) / 1000, 0)
+ ts_right = min((_end * 60 - 30) / 1000, (ts_max * 60 - 30) / 1000)
+ timestamp.append([tokens[token_id], ts_left, ts_right])
+ token_id += 1
+ _start = _end
+ timestamp = self.post(timestamp)
+ result_i = {"key": key[i], "text": text, "timestamp": timestamp}
+ results.append(result_i)
+ else:
+ result_i = {"key": key[i], "text": text}
+ results.append(result_i)
return results, meta_data
+ def post(self, timestamp):
+ timestamp_new = []
+ for i, t in enumerate(timestamp):
+ word, start, end = t
+ if word == "鈻�":
+ continue
+ if i == 0:
+ # timestamp_new.append([word, start, end])
+ timestamp_new.append([int(start * 1000), int(end * 1000)])
+ elif word.startswith("鈻�") or len(word) == 1 or not word[1].isalpha():
+ word = word[1:]
+ # timestamp_new.append([word, start, end])
+ timestamp_new.append([int(start * 1000), int(end * 1000)])
+ else:
+ # timestamp_new[-1][0] += word
+ timestamp_new[-1][1] = int(end * 1000)
+ return timestamp_new
+
def export(self, **kwargs):
- from .export_meta import export_rebuild_model
+ from export_meta import export_rebuild_model
if "max_seq_len" not in kwargs:
kwargs["max_seq_len"] = 512
models = export_rebuild_model(model=self, **kwargs)
return models
+
+ return results, meta_data
--
Gitblit v1.9.1