From 59bc02b089f7a626fe67907dcfc695eae6883f82 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 14 六月 2024 13:59:49 +0800
Subject: [PATCH] decoding

---
 funasr/models/sense_voice/model.py                                          |    2 ++
 funasr/models/llm_asr/model.py                                              |   31 +++++++++++++++++++++++++++----
 examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml |    2 +-
 funasr/datasets/openai_datasets/datasets.py                                 |   11 ++++++++---
 4 files changed, 38 insertions(+), 8 deletions(-)

diff --git a/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
index 483f219..48bd0cf 100644
--- a/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
+++ b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
@@ -69,7 +69,7 @@
   batch_size_scale_ratio_max: 2
   num_workers: 4
   audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate}
-  audio_encoder_downsample_rate: 2
+  audio_encoder_downsample_rate: 4
   data_split_num: 512
   batch_size_sample_max: 15
   retry: 20
diff --git a/funasr/datasets/openai_datasets/datasets.py b/funasr/datasets/openai_datasets/datasets.py
index 39b8453..7300b9d 100644
--- a/funasr/datasets/openai_datasets/datasets.py
+++ b/funasr/datasets/openai_datasets/datasets.py
@@ -64,6 +64,8 @@
         self.max_token_length = kwargs.get("max_token_length", 1024)
         self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
         self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
+        self.audio_adaptor_downsample_rate = kwargs.get("audio_adaptor_downsample_rate", 2)
+        self.audio_encoder_downsample_rate = kwargs.get("audio_encoder_downsample_rate", 4)
 
     def get_source_len(self, index):
         item = self.index_ds[index]
@@ -136,10 +138,13 @@
                                 speech = speech.permute(0, 2, 1)
                             # if speech_lengths > self.batch_size:
                             #     continue
+                            if self.audio_encoder_downsample_rate == 4:
+                                olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                                olens = 1 + (olens - 3 + 2 * 1) // 2
+                            elif self.audio_encoder_downsample_rate == 1:
+                                olens = speech_lengths[0].item()
 
-                            olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
-                            olens = 1 + (olens - 3 + 2 * 1) // 2
-                            sub_token_len = (olens - 1) // 2 + 1
+                            sub_token_len = (olens - 1) // self.audio_adaptor_downsample_rate + 1
                             sub_token = [0] * sub_token_len
                             fbank_beg_i = [len(source_ids)]
                             source_ids += sub_token
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 45e56c3..84d7d33 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -498,9 +498,7 @@
 
         with torch.cuda.amp.autocast(enabled=False):
             # audio encoder
-            encoder_out, encoder_out_lens = self.audio_encoder(
-                speech.permute(0, 2, 1), speech_lengths
-            )
+            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
             # audio_adaptor
             encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -565,6 +563,12 @@
             batch_size = int((labels_ids > 0 + 1).sum())
         loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
         return loss, stats, weight
+
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        return encoder_out, encoder_out_lens
 
     def data_template(self, data):
         system, user, assistant = [], [], []
@@ -721,7 +725,8 @@
             speech = speech.to(torch.float16)
         elif kwargs.get("bf16", False):
             speech = speech.to(torch.bfloat16)
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        # audio encoder
+        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
         # audio_adaptor
         encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -806,3 +811,21 @@
             ibest_writer["text_tn"][key[0]] = response_clean
 
         return results, meta_data
+
+
+@tables.register("model_classes", "LLMASR3")
+class LLMASR3(nn.Module):
+    """ """
+
+    def __init__(
+        self,
+        *args,
+        **kwargs,
+    ):
+
+        super().__init__(*args, **kwargs)
+
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
+        return encoder_out, encoder_out_lens
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index 9be5abe..c77930d 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -1042,6 +1042,7 @@
         self.length_normalized_loss = length_normalized_loss
         self.beam_search = None
         self.activation_checkpoint = kwargs.get("activation_checkpoint", False)
+        self.encoder_output_size = encoder_output_size
 
     def forward(
         self,
@@ -1451,6 +1452,7 @@
         self.ctc = ctc
 
         self.length_normalized_loss = length_normalized_loss
+        self.encoder_output_size = encoder_output_size
 
     def forward(
         self,

--
Gitblit v1.9.1