From dec1c875b2fcf0161755b93717d3eac856c6d15d Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 31 一月 2024 22:40:19 +0800
Subject: [PATCH] Funasr1.0 bugfix, audio sample input for the vad model (#1333)

---
 funasr/auto/auto_model.py                                     |    2 +-
 funasr/models/transformer/model.py                            |    6 +++---
 examples/industrial_data_pretraining/seaco_paraformer/demo.py |   20 +++++++++++++++++++-
 3 files changed, 23 insertions(+), 5 deletions(-)

diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
index 065b698..e9e226d 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -15,8 +15,26 @@
                   # spk_model_revision="v2.0.2",
                   )
 
+
+# example1
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
                      hotword='杈炬懇闄� 榄旀惌',
                      # sentence_timestamp=True,  # return sentence level information when spk_model is not given
                     )
-print(res)
\ No newline at end of file
+print(res)
+
+# example2
+import torchaudio
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+input_tensor, sample_rate = torchaudio.load(wav_file)
+input_tensor = input_tensor.mean(0)
+res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True)
+
+
+# example3
+import soundfile
+import os
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+speech, sample_rate = soundfile.read(wav_file)
+res = model.generate(input=[speech], batch_size_s=300, is_final=True)
+
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 3986a11..d99fc56 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -228,7 +228,7 @@
             data_batch = data_list[beg_idx:end_idx]
             key_batch = key_list[beg_idx:end_idx]
             batch = {"data_in": data_batch, "key": key_batch}
-            if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
+            if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
                 batch["data_in"] = data_batch[0]
                 batch["data_lengths"] = input_len
         
diff --git a/funasr/models/transformer/model.py b/funasr/models/transformer/model.py
index 4ad466b..e813e22 100644
--- a/funasr/models/transformer/model.py
+++ b/funasr/models/transformer/model.py
@@ -439,13 +439,13 @@
                 token = tokenizer.ids2tokens(token_int)
                 text = tokenizer.tokens2text(token)
                 
-                # text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
-                result_i = {"key": key[i], "token": token, "text": text}
+                text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
+                result_i = {"key": key[i], "token": token, "text": text_postprocessed}
                 results.append(result_i)
                 
                 if ibest_writer is not None:
                     ibest_writer["token"][key[i]] = " ".join(token)
-                    ibest_writer["text"][key[i]] = text
+                    ibest_writer["text"][key[i]] = text_postprocessed
         
         return results, meta_data
 

--
Gitblit v1.9.1