From 7fc17b1aafb0c53ac83a86ecf4adbee3e10fdb86 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期日, 09 六月 2024 22:40:49 +0800
Subject: [PATCH] fix bug

---
 examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh |   28 ++++++++++++++--------------
 funasr/models/llm_asr/model.py                                   |   16 ++++++++++++++--
 examples/industrial_data_pretraining/llm_asr/demo_speech2text.py |    1 +
 3 files changed, 29 insertions(+), 16 deletions(-)

diff --git a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
index dfbe95b..e5e3e23 100644
--- a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
+++ b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
@@ -15,6 +15,7 @@
     "/nfs/beinian.lzr/workspace/GPT-4o/Data/Speech2Text/TestData/aishell1_test_speech2text.jsonl"
 )
 output_dir = f"{os.path.join(ckpt_dir, ckpt_id)}"
+device = "cuda:0"
 
 ckpt_dir = sys.argv[1]
 ckpt_id = sys.argv[2]
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
index 4f521f2..81f211f 100644
--- a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
+++ b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
@@ -1,10 +1,10 @@
 
 
 
-ckpt_dir="/nfs/zhifu.gzf/ckpt/saves/qwen_1.5_7b/full/sft/asr_tts_text_exp1_ds_z3/checkpoint-11000"
+ckpt_dir="/nfs/beinian.lzr/workspace/GPT-4o/Exp/exp6/5m-8gpu/exp6_speech2text_linear_ddp_0609"
 ckpt_id="model.pt.ep0.90000"
 jsonl_dir="/nfs/beinian.lzr/workspace/GPT-4o/Data/Speech2Text/TestData"
-out_dir="${ckpt_dir}/asr"
+out_dir="${ckpt_dir}/inference-${ckpt_id}"
 mkdir -p ${out_dir}
 
 device="cuda:0"
@@ -12,9 +12,9 @@
 for data_set in "librispeech_test_clean_speech2text.jsonl" "librispeech_test_other_speech2text.jsonl"; do
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
-
-    pred_file=${out_dir}/${data_set}/1best_recog/text_tn
-    ref_file=${out_dir}/${data_set}/1best_recog/label
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/${data_set}/1best_recog/text_tn
+    ref_file=${output_dir}/${data_set}/1best_recog/label
 
     python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
 
@@ -26,9 +26,9 @@
 for data_set in "aishell1_test_speech2text.jsonl" "aishell2_ios_test_speech2text.jsonl" "librispeech_test_other_speech2text.jsonl"; do
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
-
-    pred_file=${out_dir}/${data_set}/1best_recog/text_tn
-    ref_file=${out_dir}/${data_set}/1best_recog/label
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/${data_set}/1best_recog/text_tn
+    ref_file=${output_dir}/${data_set}/1best_recog/label
 
     python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir}
 
@@ -39,9 +39,9 @@
 for data_set in "s2tt_en2zh.v20240605.test.jsonl"; do
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
-
-    pred_file=${out_dir}/${data_set}/1best_recog/text_tn
-    ref_file=${out_dir}/${data_set}/1best_recog/label
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/${data_set}/1best_recog/text_tn
+    ref_file=${output_dir}/${data_set}/1best_recog/label
 
     python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir}
 
@@ -52,9 +52,9 @@
 for data_set in "s2tt_zh2en.v20240605.test.jsonl"; do
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
-
-    pred_file=${out_dir}/${data_set}/1best_recog/text_tn
-    ref_file=${out_dir}/${data_set}/1best_recog/label
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/${data_set}/1best_recog/text_tn
+    ref_file=${output_dir}/${data_set}/1best_recog/label
 
     python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir}
 
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index aacbe45..21072b0 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -556,7 +556,7 @@
 
         return contents
 
-    def data_load_speech(self, contents: dict, tokenizer, frontend, **kwargs):
+    def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
 
         system = contents["system"]
         user = contents["user"]
@@ -594,7 +594,10 @@
                     )
                     if sub_str.startswith("!"):
                         try:
+                            time1 = time.perf_counter()
                             data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+                            time2 = time.perf_counter()
+                            meta_data["load_data"] = f"{time2 - time1:0.3f}"
                         except Exception as e:
                             logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
 
@@ -604,6 +607,15 @@
                             frontend=frontend,
                             is_final=True,
                         )  # speech: [b, T, d]
+
+                        time3 = time.perf_counter()
+                        meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+                        meta_data["batch_data_time"] = (
+                            speech_lengths.sum().item()
+                            * frontend.frame_shift
+                            * frontend.lfr_n
+                            / 1000
+                        )
 
                         if kwargs.get("permute", True):
                             speech = speech.permute(0, 2, 1)
@@ -666,7 +678,7 @@
             raise NotImplementedError("batch decoding is not implemented")
 
         contents = self.data_template(data_in[0])
-        output = self.data_load_speech(contents, tokenizer, frontend, **kwargs)
+        output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
         batch = to_device(output, kwargs["device"])
 
         # audio encoder

--
Gitblit v1.9.1