From 14a9e017811b14bdddc1301715e356843293ac30 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 05 二月 2024 11:18:41 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR merge

---
 funasr/auto/auto_model.py                                     |    5 +----
 examples/industrial_data_pretraining/seaco_paraformer/demo.py |   10 +++++++---
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
index 85d989e..804acdd 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -11,18 +11,22 @@
                   vad_model_revision="v2.0.4",
                   punc_model="damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
                   punc_model_revision="v2.0.4",
-                  # spk_model="damo/speech_campplus_sv_zh-cn_16k-common",
-                  # spk_model_revision="v2.0.2",
+                  spk_model="damo/speech_campplus_sv_zh-cn_16k-common",
+                  spk_model_revision="v2.0.2",
                   )
 
 
 # example1
 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
                      hotword='杈炬懇闄� 榄旀惌',
+                     # preset_spk_num=2,
                      # sentence_timestamp=True,  # return sentence level information when spk_model is not given
                     )
 print(res)
 
+
+'''
+# tensor or numpy as input
 # example2
 import torchaudio
 import os
@@ -38,4 +42,4 @@
 wav_file = os.path.join(model.model_path, "example/asr_example.wav")
 speech, sample_rate = soundfile.read(wav_file)
 res = model.generate(input=[speech], batch_size_s=300, is_final=True)
-
+'''
\ No newline at end of file
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index ae47d35..3829bca 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -121,9 +121,6 @@
             if spk_mode not in ["default", "vad_segment", "punc_segment"]:
                 logging.error("spk_mode should be one of default, vad_segment and punc_segment.")
             self.spk_mode = spk_mode
-            self.preset_spk_num = kwargs.get("preset_spk_num", None)
-            if self.preset_spk_num:
-                logging.warning("Using preset speaker number: {}".format(self.preset_spk_num))
             
         self.kwargs = kwargs
         self.model = model
@@ -391,7 +388,7 @@
             if self.spk_model is not None:
                 all_segments = sorted(all_segments, key=lambda x: x[0])
                 spk_embedding = result['spk_embedding']
-                labels = self.cb_model(spk_embedding.cpu(), oracle_num=self.preset_spk_num)
+                labels = self.cb_model(spk_embedding.cpu(), oracle_num=kwargs['preset_spk_num'])
                 del result['spk_embedding']
                 sv_output = postprocess(all_segments, None, labels, spk_embedding.cpu())
                 if self.spk_mode == 'vad_segment':  # recover sentence_list

--
Gitblit v1.9.1