From 9b4e9cc8a0311e5243d69b73ed073e7ea441982e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 三月 2024 16:05:29 +0800
Subject: [PATCH] train update

---
 funasr/models/monotonic_aligner/model.py |   44 ++++++++++++++++++++++++++++----------------
 1 files changed, 28 insertions(+), 16 deletions(-)

diff --git a/funasr/models/monotonic_aligner/model.py b/funasr/models/monotonic_aligner/model.py
index 62899f2..718923b 100644
--- a/funasr/models/monotonic_aligner/model.py
+++ b/funasr/models/monotonic_aligner/model.py
@@ -1,3 +1,8 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 import time
 import copy
 import torch
@@ -5,6 +10,7 @@
 from typing import Union, Dict, List, Tuple, Optional
 
 from funasr.register import tables
+from funasr.models.ctc.ctc import CTC
 from funasr.utils import postprocess_utils
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.models.paraformer.cif_predictor import mae_loss
@@ -12,10 +18,10 @@
 from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
 from funasr.models.transformer.utils.nets_utils import make_pad_mask
 from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
-from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank, load_audio_and_text_image_video
+from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
 
 
-@tables.register("model_classes", "monotonicaligner")
+@tables.register("model_classes", "MonotonicAligner")
 class MonotonicAligner(torch.nn.Module):
     """
     Author: Speech Lab of DAMO Academy, Alibaba Group
@@ -40,15 +46,15 @@
         super().__init__()
 
         if specaug is not None:
-            specaug_class = tables.specaug_classes.get(specaug.lower())
+            specaug_class = tables.specaug_classes.get(specaug)
             specaug = specaug_class(**specaug_conf)
         if normalize is not None:
-            normalize_class = tables.normalize_classes.get(normalize.lower())
+            normalize_class = tables.normalize_classes.get(normalize)
             normalize = normalize_class(**normalize_conf)
-        encoder_class = tables.encoder_classes.get(encoder.lower())
+        encoder_class = tables.encoder_classes.get(encoder)
         encoder = encoder_class(input_size=input_size, **encoder_conf)
         encoder_output_size = encoder.output_size()
-        predictor_class = tables.predictor_classes.get(predictor.lower())
+        predictor_class = tables.predictor_classes.get(predictor)
         predictor = predictor_class(**predictor_conf)
         self.specaug = specaug
         self.normalize = normalize
@@ -142,7 +148,7 @@
 
         return encoder_out, encoder_out_lens
     
-    def generate(self,
+    def inference(self,
              data_in,
              data_lengths=None,
              key: list=None,
@@ -153,7 +159,7 @@
         meta_data = {}
         # extract fbank feats
         time1 = time.perf_counter()
-        audio_list, text_token_int_list = load_audio_and_text_image_video(data_in, 
+        audio_list, text_token_int_list = load_audio_text_image_video(data_in, 
                                                                             fs=frontend.fs, 
                                                                             audio_fs=kwargs.get("fs", 16000), 
                                                                             data_type=kwargs.get("data_type", "sound"), 
@@ -165,7 +171,8 @@
         meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
         meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
             
-        speech.to(device=kwargs["device"]), speech_lengths.to(device=kwargs["device"])
+        speech = speech.to(device=kwargs["device"])
+        speech_lengths = speech_lengths.to(device=kwargs["device"])
 
         # Encoder
         encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
@@ -176,9 +183,11 @@
         
         results = []
         ibest_writer = None
-        if ibest_writer is None and kwargs.get("output_dir") is not None:
-            writer = DatadirWriter(kwargs.get("output_dir"))
-            ibest_writer = writer["tp_res"]
+        if kwargs.get("output_dir") is not None:
+            if not hasattr(self, "writer"):
+                self.writer = DatadirWriter(kwargs.get("output_dir"))
+            ibest_writer = self.writer["tp_res"]
+
         for i, (us_alpha, us_peak, token_int) in enumerate(zip(us_alphas, us_peaks, text_token_int_list)):
             token = tokenizer.ids2tokens(token_int)
             timestamp_str, timestamp = ts_prediction_lfr6_standard(us_alpha[:encoder_out_lens[i] * 3],
@@ -187,9 +196,12 @@
             text_postprocessed, time_stamp_postprocessed, _ = postprocess_utils.sentence_postprocess(token, timestamp)
             result_i = {"key": key[i], "text": text_postprocessed,
                                 "timestamp": time_stamp_postprocessed,
-                                }    
-            # ibest_writer["token"][key[i]] = " ".join(token)
-            ibest_writer["timestamp_list"][key[i]] = time_stamp_postprocessed
-            ibest_writer["timestamp_str"][key[i]] = timestamp_str
+                                }
             results.append(result_i)
+
+            if ibest_writer:
+                # ibest_writer["token"][key[i]] = " ".join(token)
+                ibest_writer["timestamp_list"][key[i]] = time_stamp_postprocessed
+                ibest_writer["timestamp_str"][key[i]] = timestamp_str
+            
         return results, meta_data
\ No newline at end of file

--
Gitblit v1.9.1