From 2cdb2d654f2109ef4e648bae6f169143e267e5db Mon Sep 17 00:00:00 2001
From: zhuzizyf <42790740+zhuzizyf@users.noreply.github.com>
Date: 星期六, 11 三月 2023 14:33:14 +0800
Subject: [PATCH] Update dataset.py
---
funasr/bin/vad_inference.py | 36 ++++++++++++++++++++++--------------
1 files changed, 22 insertions(+), 14 deletions(-)
diff --git a/funasr/bin/vad_inference.py b/funasr/bin/vad_inference.py
index b0f8a77..aaa38b3 100644
--- a/funasr/bin/vad_inference.py
+++ b/funasr/bin/vad_inference.py
@@ -11,6 +11,7 @@
from typing import Union
from typing import Dict
+import math
import numpy as np
import torch
from typeguard import check_argument_types
@@ -85,8 +86,9 @@
@torch.no_grad()
def __call__(
- self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None
- ) -> List[List[int]]:
+ self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
+ in_cache: Dict[str, torch.Tensor] = dict()
+ ) -> Tuple[List[List[int]], Dict[str, torch.Tensor]]:
"""Inference
Args:
@@ -102,36 +104,38 @@
speech = torch.tensor(speech)
if self.frontend is not None:
- feats, feats_len = self.frontend.forward(speech, speech_lengths)
+ self.frontend.filter_length_max = math.inf
+ fbanks, fbanks_len = self.frontend.forward_fbank(speech, speech_lengths)
+ feats, feats_len = self.frontend.forward_lfr_cmvn(fbanks, fbanks_len)
+ fbanks = to_device(fbanks, device=self.device)
feats = to_device(feats, device=self.device)
feats_len = feats_len.int()
else:
raise Exception("Need to extract feats first, please configure frontend configuration")
- # batch = {"feats": feats, "waveform": speech, "is_final_send": True}
- # segments = self.vad_model(**batch)
- # b. Forward Encoder sreaming
+ # b. Forward Encoder streaming
t_offset = 0
- step = min(feats_len, 6000)
+ step = min(feats_len.max(), 6000)
segments = [[]] * self.batch_size
for t_offset in range(0, feats_len, min(step, feats_len - t_offset)):
if t_offset + step >= feats_len - 1:
step = feats_len - t_offset
- is_final_send = True
+ is_final = True
else:
- is_final_send = False
+ is_final = False
batch = {
"feats": feats[:, t_offset:t_offset + step, :],
"waveform": speech[:, t_offset * 160:min(speech.shape[-1], (t_offset + step - 1) * 160 + 400)],
- "is_final_send": is_final_send
+ "is_final": is_final,
+ "in_cache": in_cache
}
# a. To device
batch = to_device(batch, device=self.device)
- segments_part = self.vad_model(**batch)
+ segments_part, in_cache = self.vad_model(**batch)
if segments_part:
for batch_num in range(0, self.batch_size):
segments[batch_num] += segments_part[batch_num]
- return segments
+ return fbanks, segments
def inference(
@@ -221,9 +225,13 @@
raw_inputs: Union[np.ndarray, torch.Tensor] = None,
output_dir_v2: Optional[str] = None,
fs: dict = None,
- param_dict: dict = None,
+ param_dict: dict = None
):
# 3. Build data-iterator
+ if data_path_and_name_and_type is None and raw_inputs is not None:
+ if isinstance(raw_inputs, torch.Tensor):
+ raw_inputs = raw_inputs.numpy()
+ data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
loader = VADTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
@@ -256,7 +264,7 @@
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
# do vad segment
- results = speech2vadsegment(**batch)
+ _, results = speech2vadsegment(**batch)
for i, _ in enumerate(keys):
results[i] = json.dumps(results[i])
item = {'key': keys[i], 'value': results[i]}
--
Gitblit v1.9.1