From 94de39dde2e616a01683c518023d0fab72b4e103 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 19 二月 2024 22:21:50 +0800
Subject: [PATCH] aishell example
---
funasr/models/ct_transformer/model.py | 77 +++++++++++++++++++++++++-------------
1 files changed, 51 insertions(+), 26 deletions(-)
diff --git a/funasr/models/ct_transformer/model.py b/funasr/models/ct_transformer/model.py
index 24a6aea..1e53aa3 100644
--- a/funasr/models/ct_transformer/model.py
+++ b/funasr/models/ct_transformer/model.py
@@ -1,21 +1,34 @@
-from typing import Any
-from typing import List
-from typing import Tuple
-from typing import Optional
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
+import torch
import numpy as np
import torch.nn.functional as F
-
-from funasr.models.transformer.utils.nets_utils import make_pad_mask
-from funasr.train_utils.device_funcs import force_gatherable
-from funasr.train_utils.device_funcs import to_device
-import torch
-import torch.nn as nn
-from funasr.models.ct_transformer.utils import split_to_mini_sentence, split_words
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+from typing import Any, List, Tuple, Optional
from funasr.register import tables
+from funasr.train_utils.device_funcs import to_device
+from funasr.train_utils.device_funcs import force_gatherable
+from funasr.utils.load_utils import load_audio_text_image_video
+from funasr.models.transformer.utils.nets_utils import make_pad_mask
+from funasr.models.ct_transformer.utils import split_to_mini_sentence, split_words
+
+
+if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
+ from torch.cuda.amp import autocast
+else:
+ # Nothing to do if torch<1.6.0
+ @contextmanager
+ def autocast(enabled=True):
+ yield
+
@tables.register("model_classes", "CTTransformer")
-class CTTransformer(nn.Module):
+class CTTransformer(torch.nn.Module):
"""
Author: Speech Lab of DAMO Academy, Alibaba Group
CT-Transformer: Controllable time-delay transformer for real-time punctuation prediction and disfluency detection
@@ -44,11 +57,11 @@
punc_weight = [1] * punc_size
- self.embed = nn.Embedding(vocab_size, embed_unit)
- encoder_class = tables.encoder_classes.get(encoder.lower())
+ self.embed = torch.nn.Embedding(vocab_size, embed_unit)
+ encoder_class = tables.encoder_classes.get(encoder)
encoder = encoder_class(**encoder_conf)
- self.decoder = nn.Linear(att_unit, punc_size)
+ self.decoder = torch.nn.Linear(att_unit, punc_size)
self.encoder = encoder
self.punc_list = punc_list
self.punc_weight = punc_weight
@@ -59,7 +72,7 @@
- def punc_forward(self, text: torch.Tensor, text_lengths: torch.Tensor) -> Tuple[torch.Tensor, None]:
+ def punc_forward(self, text: torch.Tensor, text_lengths: torch.Tensor, **kwargs):
"""Compute loss value from buffer sequences.
Args:
@@ -210,7 +223,7 @@
loss, stats, weight = force_gatherable((loss, stats, ntokens), loss.device)
return loss, stats, weight
- def generate(self,
+ def inference(self,
data_in,
data_lengths=None,
key: list = None,
@@ -219,13 +232,19 @@
**kwargs,
):
assert len(data_in) == 1
-
+ text = load_audio_text_image_video(data_in, data_type=kwargs.get("kwargs", "text"))[0]
vad_indexes = kwargs.get("vad_indexes", None)
- text = data_in[0]
- text_lengths = data_lengths[0] if data_lengths is not None else None
+ # text = data_in[0]
+ # text_lengths = data_lengths[0] if data_lengths is not None else None
split_size = kwargs.get("split_size", 20)
-
- tokens = split_words(text)
+
+ jieba_usr_dict = kwargs.get("jieba_usr_dict", None)
+ if jieba_usr_dict and isinstance(jieba_usr_dict, str):
+ import jieba
+ jieba.load_userdict(jieba_usr_dict)
+ jieba_usr_dict = jieba
+ kwargs["jieba_usr_dict"] = "jieba_usr_dict"
+ tokens = split_words(text, jieba_usr_dict=jieba_usr_dict)
tokens_int = tokenizer.encode(tokens)
mini_sentences = split_to_mini_sentence(tokens, split_size)
@@ -238,6 +257,7 @@
cache_pop_trigger_limit = 200
results = []
meta_data = {}
+ punc_array = None
for mini_sentence_i in range(len(mini_sentences)):
mini_sentence = mini_sentences[mini_sentence_i]
mini_sentence_id = mini_sentences_id[mini_sentence_i]
@@ -313,15 +333,20 @@
elif new_mini_sentence[-1] == ",":
new_mini_sentence_out = new_mini_sentence[:-1] + "."
new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
- elif new_mini_sentence[-1] != "銆�" and new_mini_sentence[-1] != "锛�" and len(new_mini_sentence[-1].encode())==0:
+ elif new_mini_sentence[-1] != "銆�" and new_mini_sentence[-1] != "锛�" and len(new_mini_sentence[-1].encode())!=1:
new_mini_sentence_out = new_mini_sentence + "銆�"
new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
+ if len(punctuations): punctuations[-1] = 2
elif new_mini_sentence[-1] != "." and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())==1:
new_mini_sentence_out = new_mini_sentence + "."
new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.sentence_end_id]
-
- result_i = {"key": key[0], "text": new_mini_sentence_out}
+ if len(punctuations): punctuations[-1] = 2
+ # keep a punctuations array for punc segment
+ if punc_array is None:
+ punc_array = punctuations
+ else:
+ punc_array = torch.cat([punc_array, punctuations], dim=0)
+ result_i = {"key": key[0], "text": new_mini_sentence_out, "punc_array": punc_array}
results.append(result_i)
-
return results, meta_data
--
Gitblit v1.9.1