From be7230fd94477fdc22a9b315af7c5e3ac4a7eb8d Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 28 二月 2023 16:20:45 +0800
Subject: [PATCH] Merge pull request #163 from alibaba-damo-academy/dev_zly
---
funasr/bin/vad_inference.py | 8 +++++---
1 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/funasr/bin/vad_inference.py b/funasr/bin/vad_inference.py
index 258b38b..aaa38b3 100644
--- a/funasr/bin/vad_inference.py
+++ b/funasr/bin/vad_inference.py
@@ -86,7 +86,8 @@
@torch.no_grad()
def __call__(
- self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None
+ self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
+ in_cache: Dict[str, torch.Tensor] = dict()
) -> Tuple[List[List[int]], Dict[str, torch.Tensor]]:
"""Inference
@@ -125,11 +126,12 @@
batch = {
"feats": feats[:, t_offset:t_offset + step, :],
"waveform": speech[:, t_offset * 160:min(speech.shape[-1], (t_offset + step - 1) * 160 + 400)],
- "is_final": is_final
+ "is_final": is_final,
+ "in_cache": in_cache
}
# a. To device
batch = to_device(batch, device=self.device)
- segments_part = self.vad_model(**batch)
+ segments_part, in_cache = self.vad_model(**batch)
if segments_part:
for batch_num in range(0, self.batch_size):
segments[batch_num] += segments_part[batch_num]
--
Gitblit v1.9.1