From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交

---
 runtime/python/onnxruntime/funasr_onnx/vad_bin.py |   51 ++++++++++++++++++++++++++++++++++-----------------
 1 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/runtime/python/onnxruntime/funasr_onnx/vad_bin.py b/runtime/python/onnxruntime/funasr_onnx/vad_bin.py
index 92928a8..f784f26 100644
--- a/runtime/python/onnxruntime/funasr_onnx/vad_bin.py
+++ b/runtime/python/onnxruntime/funasr_onnx/vad_bin.py
@@ -4,7 +4,7 @@
 
 import os.path
 from pathlib import Path
-from typing import List, Union, Tuple
+from typing import List, Union, Tuple, Dict
 
 import copy
 import librosa
@@ -69,7 +69,7 @@
             model_file, device_id, intra_op_num_threads=intra_op_num_threads
         )
         self.batch_size = batch_size
-        self.vad_scorer = E2EVadModel(config["model_conf"])
+        self.vad_scorer_config = config["model_conf"]
         self.max_end_sil = (
             max_end_sil if max_end_sil is not None else config["model_conf"]["max_end_silence_time"]
         )
@@ -90,10 +90,9 @@
         waveform_list = self.load_data(audio_in, self.frontend.opts.frame_opts.samp_freq)
         waveform_nums = len(waveform_list)
         is_final = kwargs.get("kwargs", False)
-
         segments = [[]] * self.batch_size
         for beg_idx in range(0, waveform_nums, self.batch_size):
-
+            vad_scorer = E2EVadModel(self.vad_scorer_config)
             end_idx = min(waveform_nums, beg_idx + self.batch_size)
             waveform = waveform_list[beg_idx:end_idx]
             feats, feats_len = self.extract_feat(waveform)
@@ -122,7 +121,7 @@
                     inputs.extend(in_cache)
                     scores, out_caches = self.infer(inputs)
                     in_cache = out_caches
-                    segments_part = self.vad_scorer(
+                    segments_part = vad_scorer(
                         scores,
                         waveform_package,
                         is_final=is_final,
@@ -143,7 +142,24 @@
         return segments
 
     def load_data(self, wav_content: Union[str, np.ndarray, List[str]], fs: int = None) -> List:
+        
+        def convert_to_wav(input_path, output_path):
+            from pydub import AudioSegment
+            try:
+                audio = AudioSegment.from_mp3(input_path)
+                audio.export(output_path, format="wav")
+                print("闊抽鏂囦欢涓簃p3鏍煎紡锛屽凡杞崲涓簑av鏍煎紡")
+                
+            except Exception as e:
+                print(f"杞崲澶辫触:{e}")
+
         def load_wav(path: str) -> np.ndarray:
+            if not path.lower().endswith('.wav'):
+                import os
+                input_path = path
+                path = os.path.splitext(path)[0]+'.wav'
+                convert_to_wav(input_path,path) #灏唌p3鏍煎紡杞崲鎴恮av鏍煎紡
+
             waveform, _ = librosa.load(path, sr=fs)
             return waveform
 
@@ -231,19 +247,17 @@
             model_dir = model.export(type="onnx", quantize=quantize, **kwargs)
 
         config_file = os.path.join(model_dir, "config.yaml")
-        cmvn_file = os.path.join(model_dir, "am.mvn")
-        config = read_yaml(config_file)
+        self.cmvn_file = os.path.join(model_dir, "am.mvn")
+        self.config = read_yaml(config_file)
 
-        self.frontend = WavFrontendOnline(cmvn_file=cmvn_file, **config["frontend_conf"])
         self.ort_infer = OrtInferSession(
             model_file, device_id, intra_op_num_threads=intra_op_num_threads
         )
         self.batch_size = batch_size
-        self.vad_scorer = E2EVadModel(config["model_conf"])
         self.max_end_sil = (
-            max_end_sil if max_end_sil is not None else config["model_conf"]["max_end_silence_time"]
+            max_end_sil if max_end_sil is not None else self.config["model_conf"]["max_end_silence_time"]
         )
-        self.encoder_conf = config["encoder_conf"]
+        self.encoder_conf = self.config["encoder_conf"]
 
     def prepare_cache(self, in_cache: list = []):
         if len(in_cache) > 0:
@@ -259,20 +273,22 @@
     def __call__(self, audio_in: np.ndarray, **kwargs) -> List:
         waveforms = np.expand_dims(audio_in, axis=0)
 
-        param_dict = kwargs.get("param_dict", dict())
+        param_dict: Dict = kwargs.get("param_dict", dict())
         is_final = param_dict.get("is_final", False)
-        feats, feats_len = self.extract_feat(waveforms, is_final)
+        frontend: WavFrontendOnline = param_dict.get("frontend", WavFrontendOnline(cmvn_file=self.cmvn_file, **self.config["frontend_conf"]))
+        feats, feats_len = self.extract_feat(frontend=frontend, waveforms=waveforms, is_final=is_final)
         segments = []
         if feats.size != 0:
             in_cache = param_dict.get("in_cache", list())
             in_cache = self.prepare_cache(in_cache)
+            vad_scorer = param_dict.get("vad_scorer", E2EVadModel(self.config["model_conf"]))
             try:
                 inputs = [feats]
                 inputs.extend(in_cache)
                 scores, out_caches = self.infer(inputs)
                 param_dict["in_cache"] = out_caches
-                waveforms = self.frontend.get_waveforms()
-                segments = self.vad_scorer(
+                waveforms = frontend.get_waveforms()
+                segments = vad_scorer(
                     scores, waveforms, is_final=is_final, max_end_sil=self.max_end_sil, online=True
                 )
 
@@ -280,6 +296,7 @@
                 # logging.warning(traceback.format_exc())
                 logging.warning("input wav is silence or noise")
                 segments = []
+        param_dict.update({"frontend": frontend, "vad_scorer": vad_scorer})
         return segments
 
     def load_data(self, wav_content: Union[str, np.ndarray, List[str]], fs: int = None) -> List:
@@ -299,13 +316,13 @@
         raise TypeError(f"The type of {wav_content} is not in [str, np.ndarray, list]")
 
     def extract_feat(
-        self, waveforms: np.ndarray, is_final: bool = False
+        self, frontend: WavFrontendOnline, waveforms: np.ndarray, is_final: bool = False
     ) -> Tuple[np.ndarray, np.ndarray]:
         waveforms_lens = np.zeros(waveforms.shape[0]).astype(np.int32)
         for idx, waveform in enumerate(waveforms):
             waveforms_lens[idx] = waveform.shape[-1]
 
-        feats, feats_len = self.frontend.extract_fbank(waveforms, waveforms_lens, is_final)
+        feats, feats_len = frontend.extract_fbank(waveforms, waveforms_lens, is_final)
         # feats.append(feat)
         # feats_len.append(feat_len)
 

--
Gitblit v1.9.1