From eb92e79fb94e7b3df8f27c8ce3e607a70dff2a2e Mon Sep 17 00:00:00 2001
From: 语帆 <yf352572@alibaba-inc.com>
Date: 星期三, 28 二月 2024 15:21:32 +0800
Subject: [PATCH] test

---
 funasr/models/lcbnet/model.py |   59 +++++++++++++++++++++++++++++++++++++++++++++++++----------
 1 files changed, 49 insertions(+), 10 deletions(-)

diff --git a/funasr/models/lcbnet/model.py b/funasr/models/lcbnet/model.py
index c68ccd7..45b1ee5 100644
--- a/funasr/models/lcbnet/model.py
+++ b/funasr/models/lcbnet/model.py
@@ -1,3 +1,8 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 import logging
 from typing import Union, Dict, List, Tuple, Optional
 
@@ -17,10 +22,14 @@
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.register import tables
 
-@tables.register("model_classes", "Transformer")
-class Transformer(nn.Module):
-    """CTC-attention hybrid Encoder-Decoder model"""
-
+import pdb
+@tables.register("model_classes", "LCBNet")
+class LCBNet(nn.Module):
+    """
+    Author: Speech Lab of DAMO Academy, Alibaba Group
+    LCB-NET: LONG-CONTEXT BIASING FOR AUDIO-VISUAL SPEECH RECOGNITION
+    https://arxiv.org/abs/2401.06390
+    """
     
     def __init__(
         self,
@@ -32,10 +41,19 @@
         encoder_conf: dict = None,
         decoder: str = None,
         decoder_conf: dict = None,
+        text_encoder: str = None,
+        text_encoder_conf: dict = None,
+        bias_predictor: str = None,
+        bias_predictor_conf: dict = None,
+        fusion_encoder: str = None,
+        fusion_encoder_conf: dict = None,
         ctc: str = None,
         ctc_conf: dict = None,
         ctc_weight: float = 0.5,
         interctc_weight: float = 0.0,
+        select_num: int = 2,
+        select_length: int = 3,
+        insert_blank: bool = True,
         input_size: int = 80,
         vocab_size: int = -1,
         ignore_id: int = -1,
@@ -66,6 +84,16 @@
         encoder_class = tables.encoder_classes.get(encoder)
         encoder = encoder_class(input_size=input_size, **encoder_conf)
         encoder_output_size = encoder.output_size()
+
+        # lcbnet modules: text encoder, fusion encoder and bias predictor
+        text_encoder_class = tables.encoder_classes.get(text_encoder)
+        text_encoder = text_encoder_class(input_size=vocab_size, **text_encoder_conf)
+        fusion_encoder_class = tables.encoder_classes.get(fusion_encoder)
+        fusion_encoder = fusion_encoder_class(**fusion_encoder_conf)
+        bias_predictor_class = tables.encoder_classes.get(bias_predictor)
+        bias_predictor = bias_predictor_class(**bias_predictor_conf)
+
+
         if decoder is not None:
             decoder_class = tables.decoder_classes.get(decoder)
             decoder = decoder_class(
@@ -91,6 +119,13 @@
         self.specaug = specaug
         self.normalize = normalize
         self.encoder = encoder
+        # lcbnet
+        self.text_encoder = text_encoder
+        self.fusion_encoder = fusion_encoder
+        self.bias_predictor = bias_predictor
+        self.select_num = select_num
+        self.select_length = select_length
+        self.insert_blank = insert_blank
 
         if not hasattr(self.encoder, "interctc_use_conditioning"):
             self.encoder.interctc_use_conditioning = False
@@ -239,15 +274,15 @@
                 ind: int
         """
         with autocast(False):
-
+            pdb.set_trace()
             # Data augmentation
             if self.specaug is not None and self.training:
                 speech, speech_lengths = self.specaug(speech, speech_lengths)
-            
+            pdb.set_trace()
             # Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
             if self.normalize is not None:
                 speech, speech_lengths = self.normalize(speech, speech_lengths)
-        
+        pdb.set_trace()
         # Forward encoder
         # feats: (Batch, Length, Dim)
         # -> encoder_out: (Batch, Length2, Dim2)
@@ -264,7 +299,7 @@
         
         if intermediate_outs is not None:
             return (encoder_out, intermediate_outs), encoder_out_lens
-        
+        pdb.set_trace()
         return encoder_out, encoder_out_lens
     
     def _calc_att_loss(
@@ -391,19 +426,23 @@
         else:
             # extract fbank feats
             time1 = time.perf_counter()
-            audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
+            sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
                                                             data_type=kwargs.get("data_type", "sound"),
                                                             tokenizer=tokenizer)
             time2 = time.perf_counter()
             meta_data["load_data"] = f"{time2 - time1:0.3f}"
+            audio_sample_list = sample_list[0]
+            ocr_sample_list = sample_list[1]
             speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
                                                    frontend=frontend)
             time3 = time.perf_counter()
             meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
-            meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
+            frame_shift = 10 
+            meta_data["batch_data_time"] = speech_lengths.sum().item() * frame_shift / 1000
 
         speech = speech.to(device=kwargs["device"])
         speech_lengths = speech_lengths.to(device=kwargs["device"])
+        pdb.set_trace()
         # Encoder
         encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
         if isinstance(encoder_out, tuple):

--
Gitblit v1.9.1