From 33d3d2084403fd34b79c835d2f2fe04f6cd8f738 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 13 九月 2023 09:33:54 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add

---
 funasr/bin/vad_infer.py |   52 ++++++++++++++++++----------------------------------
 1 files changed, 18 insertions(+), 34 deletions(-)

diff --git a/funasr/bin/vad_infer.py b/funasr/bin/vad_infer.py
index 5835e77..73e1f3f 100644
--- a/funasr/bin/vad_infer.py
+++ b/funasr/bin/vad_infer.py
@@ -1,37 +1,22 @@
-import argparse
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 import logging
-import os
-import sys
-import json
+import math
 from pathlib import Path
-from typing import Any
+from typing import Dict
 from typing import List
-from typing import Optional
-from typing import Sequence
 from typing import Tuple
 from typing import Union
-from typing import Dict
 
-import math
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
-from funasr.fileio.datadir_writer import DatadirWriter
-from funasr.modules.scorers.scorer_interface import BatchScorerInterface
-from funasr.modules.subsampling import TooShortUttError
-from funasr.tasks.vad import VADTask
-from funasr.torch_utils.device_funcs import to_device
-from funasr.torch_utils.set_all_random_seed import set_all_random_seed
-from funasr.utils import config_argparse
-from funasr.utils.cli_utils import get_commandline_args
-from funasr.utils.types import str2bool
-from funasr.utils.types import str2triple_str
-from funasr.utils.types import str_or_none
-from funasr.utils import asr_utils, wav_utils, postprocess_utils
+from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.models.frontend.wav_frontend import WavFrontend, WavFrontendOnline
-
+from funasr.torch_utils.device_funcs import to_device
 
 
 class Speech2VadSegment:
@@ -56,11 +41,10 @@
             dtype: str = "float32",
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build vad model
-        vad_model, vad_infer_args = VADTask.build_model_from_file(
-            vad_infer_config, vad_model_file, device
+        vad_model, vad_infer_args = build_model_from_file(
+            vad_infer_config, vad_model_file, None, device, task_name="vad"
         )
         frontend = None
         if vad_infer_args.frontend is not None:
@@ -90,7 +74,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -123,12 +106,13 @@
                 "in_cache": in_cache
             }
             # a. To device
-            #batch = to_device(batch, device=self.device)
+            # batch = to_device(batch, device=self.device)
             segments_part, in_cache = self.vad_model(**batch)
             if segments_part:
                 for batch_num in range(0, self.batch_size):
                     segments[batch_num] += segments_part[batch_num]
         return fbanks, segments
+
 
 class Speech2VadSegmentOnline(Speech2VadSegment):
     """Speech2VadSegmentOnline class
@@ -141,13 +125,13 @@
         [[10, 230], [245, 450], ...]
 
     """
+
     def __init__(self, **kwargs):
         super(Speech2VadSegmentOnline, self).__init__(**kwargs)
         vad_cmvn_file = kwargs.get('vad_cmvn_file', None)
         self.frontend = None
         if self.vad_infer_args.frontend is not None:
             self.frontend = WavFrontendOnline(cmvn_file=vad_cmvn_file, **self.vad_infer_args.frontend_conf)
-
 
     @torch.no_grad()
     def __call__(
@@ -162,7 +146,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -170,7 +153,8 @@
         batch_size = speech.shape[0]
         segments = [[]] * batch_size
         if self.frontend is not None:
-            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final)
+            reset = in_cache == dict()
+            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final, reset)
             fbanks, _ = self.frontend.get_fbank()
         else:
             raise Exception("Need to extract feats first, please configure frontend configuration")
@@ -178,6 +162,8 @@
             feats = to_device(feats, device=self.device)
             feats_len = feats_len.int()
             waveforms = self.frontend.get_waveforms()
+            if max_end_sil == 800 and self.vad_infer_args.vad_post_conf["max_end_silence_time"] != 800:
+                max_end_sil = self.vad_infer_args.vad_post_conf["max_end_silence_time"]
 
             batch = {
                 "feats": feats,
@@ -192,5 +178,3 @@
             # in_cache.update(batch['in_cache'])
             # in_cache = {key: value for key, value in batch['in_cache'].items()}
         return fbanks, segments, in_cache
-
-

--
Gitblit v1.9.1