From 9b4e9cc8a0311e5243d69b73ed073e7ea441982e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 三月 2024 16:05:29 +0800
Subject: [PATCH] train update
---
funasr/models/monotonic_aligner/model.py | 65 +++++++++++++++++---------------
1 files changed, 35 insertions(+), 30 deletions(-)
diff --git a/funasr/models/monotonic_aligner/model.py b/funasr/models/monotonic_aligner/model.py
index ece319d..718923b 100644
--- a/funasr/models/monotonic_aligner/model.py
+++ b/funasr/models/monotonic_aligner/model.py
@@ -1,31 +1,33 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
import time
import copy
import torch
from torch.cuda.amp import autocast
from typing import Union, Dict, List, Tuple, Optional
-from funasr.models.paraformer.cif_predictor import mae_loss
-from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
-from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
-from funasr.metrics.compute_acc import th_accuracy
-from funasr.train_utils.device_funcs import force_gatherable
-from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
-from funasr.utils import postprocess_utils
-from funasr.utils.datadir_writer import DatadirWriter
from funasr.register import tables
from funasr.models.ctc.ctc import CTC
-from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
+from funasr.utils import postprocess_utils
+from funasr.utils.datadir_writer import DatadirWriter
+from funasr.models.paraformer.cif_predictor import mae_loss
+from funasr.train_utils.device_funcs import force_gatherable
+from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
+from funasr.models.transformer.utils.nets_utils import make_pad_mask
+from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
+from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
-
-@tables.register("model_classes", "monotonicaligner")
+@tables.register("model_classes", "MonotonicAligner")
class MonotonicAligner(torch.nn.Module):
"""
Author: Speech Lab of DAMO Academy, Alibaba Group
Achieving timestamp prediction while recognizing with non-autoregressive end-to-end ASR model
https://arxiv.org/abs/2301.12343
"""
-
def __init__(
self,
input_size: int = 80,
@@ -41,19 +43,18 @@
length_normalized_loss: bool = False,
**kwargs,
):
-
super().__init__()
if specaug is not None:
- specaug_class = tables.specaug_classes.get(specaug.lower())
+ specaug_class = tables.specaug_classes.get(specaug)
specaug = specaug_class(**specaug_conf)
if normalize is not None:
- normalize_class = tables.normalize_classes.get(normalize.lower())
+ normalize_class = tables.normalize_classes.get(normalize)
normalize = normalize_class(**normalize_conf)
- encoder_class = tables.encoder_classes.get(encoder.lower())
+ encoder_class = tables.encoder_classes.get(encoder)
encoder = encoder_class(input_size=input_size, **encoder_conf)
encoder_output_size = encoder.output_size()
- predictor_class = tables.predictor_classes.get(predictor.lower())
+ predictor_class = tables.predictor_classes.get(predictor)
predictor = predictor_class(**predictor_conf)
self.specaug = specaug
self.normalize = normalize
@@ -147,7 +148,7 @@
return encoder_out, encoder_out_lens
- def generate(self,
+ def inference(self,
data_in,
data_lengths=None,
key: list=None,
@@ -155,11 +156,10 @@
frontend=None,
**kwargs,
):
-
meta_data = {}
# extract fbank feats
time1 = time.perf_counter()
- audio_list, text_token_int_list = load_audio_and_text_image_video(data_in,
+ audio_list, text_token_int_list = load_audio_text_image_video(data_in,
fs=frontend.fs,
audio_fs=kwargs.get("fs", 16000),
data_type=kwargs.get("data_type", "sound"),
@@ -171,7 +171,8 @@
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
- speech.to(device=kwargs["device"]), speech_lengths.to(device=kwargs["device"])
+ speech = speech.to(device=kwargs["device"])
+ speech_lengths = speech_lengths.to(device=kwargs["device"])
# Encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
@@ -182,21 +183,25 @@
results = []
ibest_writer = None
- if ibest_writer is None and kwargs.get("output_dir") is not None:
- writer = DatadirWriter(kwargs.get("output_dir"))
- ibest_writer = writer["tp_res"]
+ if kwargs.get("output_dir") is not None:
+ if not hasattr(self, "writer"):
+ self.writer = DatadirWriter(kwargs.get("output_dir"))
+ ibest_writer = self.writer["tp_res"]
+
for i, (us_alpha, us_peak, token_int) in enumerate(zip(us_alphas, us_peaks, text_token_int_list)):
token = tokenizer.ids2tokens(token_int)
timestamp_str, timestamp = ts_prediction_lfr6_standard(us_alpha[:encoder_out_lens[i] * 3],
us_peak[:encoder_out_lens[i] * 3],
copy.copy(token))
- text_postprocessed, time_stamp_postprocessed, word_lists = postprocess_utils.sentence_postprocess(
- token, timestamp)
+ text_postprocessed, time_stamp_postprocessed, _ = postprocess_utils.sentence_postprocess(token, timestamp)
result_i = {"key": key[i], "text": text_postprocessed,
"timestamp": time_stamp_postprocessed,
- }
- # ibest_writer["token"][key[i]] = " ".join(token)
- ibest_writer["timestamp_list"][key[i]] = time_stamp_postprocessed
- ibest_writer["timestamp_str"][key[i]] = timestamp_str
+ }
results.append(result_i)
+
+ if ibest_writer:
+ # ibest_writer["token"][key[i]] = " ".join(token)
+ ibest_writer["timestamp_list"][key[i]] = time_stamp_postprocessed
+ ibest_writer["timestamp_str"][key[i]] = timestamp_str
+
return results, meta_data
\ No newline at end of file
--
Gitblit v1.9.1