From 2868fe3df4e92a6ae3e327faf6e57ea492e04124 Mon Sep 17 00:00:00 2001
From: 志浩 <neo.dzh@alibaba-inc.com>
Date: 星期四, 16 三月 2023 19:24:21 +0800
Subject: [PATCH] Merge branch 'main' into dev_dzh
---
funasr/bin/vad_inference.py | 69 +++++++++++++++++++++++-----------
1 files changed, 47 insertions(+), 22 deletions(-)
diff --git a/funasr/bin/vad_inference.py b/funasr/bin/vad_inference.py
index 9f1d0f3..aaa38b3 100644
--- a/funasr/bin/vad_inference.py
+++ b/funasr/bin/vad_inference.py
@@ -1,6 +1,7 @@
import argparse
import logging
import sys
+import json
from pathlib import Path
from typing import Any
from typing import List
@@ -10,6 +11,7 @@
from typing import Union
from typing import Dict
+import math
import numpy as np
import torch
from typeguard import check_argument_types
@@ -80,11 +82,13 @@
self.device = device
self.dtype = dtype
self.frontend = frontend
+ self.batch_size = batch_size
@torch.no_grad()
def __call__(
- self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None
- ) -> List[List[int]]:
+ self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
+ in_cache: Dict[str, torch.Tensor] = dict()
+ ) -> Tuple[List[List[int]], Dict[str, torch.Tensor]]:
"""Inference
Args:
@@ -100,22 +104,38 @@
speech = torch.tensor(speech)
if self.frontend is not None:
- feats, feats_len = self.frontend.forward(speech, speech_lengths)
+ self.frontend.filter_length_max = math.inf
+ fbanks, fbanks_len = self.frontend.forward_fbank(speech, speech_lengths)
+ feats, feats_len = self.frontend.forward_lfr_cmvn(fbanks, fbanks_len)
+ fbanks = to_device(fbanks, device=self.device)
feats = to_device(feats, device=self.device)
feats_len = feats_len.int()
else:
raise Exception("Need to extract feats first, please configure frontend configuration")
- batch = {"feats": feats, "feats_lengths": feats_len, "waveform": speech}
- # a. To device
- batch = to_device(batch, device=self.device)
-
- # b. Forward Encoder
- segments = self.vad_model(**batch)
-
- return segments
-
-
+ # b. Forward Encoder streaming
+ t_offset = 0
+ step = min(feats_len.max(), 6000)
+ segments = [[]] * self.batch_size
+ for t_offset in range(0, feats_len, min(step, feats_len - t_offset)):
+ if t_offset + step >= feats_len - 1:
+ step = feats_len - t_offset
+ is_final = True
+ else:
+ is_final = False
+ batch = {
+ "feats": feats[:, t_offset:t_offset + step, :],
+ "waveform": speech[:, t_offset * 160:min(speech.shape[-1], (t_offset + step - 1) * 160 + 400)],
+ "is_final": is_final,
+ "in_cache": in_cache
+ }
+ # a. To device
+ batch = to_device(batch, device=self.device)
+ segments_part, in_cache = self.vad_model(**batch)
+ if segments_part:
+ for batch_num in range(0, self.batch_size):
+ segments[batch_num] += segments_part[batch_num]
+ return fbanks, segments
def inference(
@@ -152,11 +172,12 @@
)
return inference_pipeline(data_path_and_name_and_type, raw_inputs)
+
def inference_modelscope(
batch_size: int,
ngpu: int,
log_level: Union[int, str],
- #data_path_and_name_and_type,
+ # data_path_and_name_and_type,
vad_infer_config: Optional[str],
vad_model_file: Optional[str],
vad_cmvn_file: Optional[str] = None,
@@ -167,7 +188,6 @@
dtype: str = "float32",
seed: int = 0,
num_workers: int = 1,
- param_dict: dict = None,
**kwargs,
):
assert check_argument_types()
@@ -201,13 +221,17 @@
speech2vadsegment = Speech2VadSegment(**speech2vadsegment_kwargs)
def _forward(
- data_path_and_name_and_type,
- raw_inputs: Union[np.ndarray, torch.Tensor] = None,
- output_dir_v2: Optional[str] = None,
- fs: dict = None,
- param_dict: dict = None,
+ data_path_and_name_and_type,
+ raw_inputs: Union[np.ndarray, torch.Tensor] = None,
+ output_dir_v2: Optional[str] = None,
+ fs: dict = None,
+ param_dict: dict = None
):
# 3. Build data-iterator
+ if data_path_and_name_and_type is None and raw_inputs is not None:
+ if isinstance(raw_inputs, torch.Tensor):
+ raw_inputs = raw_inputs.numpy()
+ data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
loader = VADTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
@@ -238,14 +262,15 @@
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
- # batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
# do vad segment
- results = speech2vadsegment(**batch)
+ _, results = speech2vadsegment(**batch)
for i, _ in enumerate(keys):
+ results[i] = json.dumps(results[i])
item = {'key': keys[i], 'value': results[i]}
vad_results.append(item)
if writer is not None:
+ results[i] = json.loads(results[i])
ibest_writer["text"][keys[i]] = "{}".format(results[i])
return vad_results
--
Gitblit v1.9.1