From 94bee9029208762468399100654119b7004e769f Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期三, 31 五月 2023 13:59:07 +0800
Subject: [PATCH] Merge pull request #565 from alibaba-damo-academy/dev_zly2

---
 funasr/models/e2e_vad.py               |    4 ++--
 funasr/bin/vad_infer.py                |    3 ++-
 funasr/models/frontend/wav_frontend.py |    6 ++++--
 3 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/funasr/bin/vad_infer.py b/funasr/bin/vad_infer.py
index 245757c..e1698d0 100644
--- a/funasr/bin/vad_infer.py
+++ b/funasr/bin/vad_infer.py
@@ -175,7 +175,8 @@
         batch_size = speech.shape[0]
         segments = [[]] * batch_size
         if self.frontend is not None:
-            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final)
+            reset = in_cache == dict()
+            feats, feats_len = self.frontend.forward(speech, speech_lengths, is_final, reset)
             fbanks, _ = self.frontend.get_fbank()
         else:
             raise Exception("Need to extract feats first, please configure frontend configuration")
diff --git a/funasr/models/e2e_vad.py b/funasr/models/e2e_vad.py
index 594c27e..71ed2cf 100644
--- a/funasr/models/e2e_vad.py
+++ b/funasr/models/e2e_vad.py
@@ -226,7 +226,6 @@
                                                self.vad_opts.frame_in_ms)
         self.encoder = encoder
         # init variables
-        self.is_final = False
         self.data_buf_start_frame = 0
         self.frm_cnt = 0
         self.latest_confirmed_speech_frame = 0
@@ -257,7 +256,6 @@
         self.frontend = frontend
 
     def AllResetDetection(self):
-        self.is_final = False
         self.data_buf_start_frame = 0
         self.frm_cnt = 0
         self.latest_confirmed_speech_frame = 0
@@ -473,6 +471,8 @@
     def forward(self, feats: torch.Tensor, waveform: torch.tensor, in_cache: Dict[str, torch.Tensor] = dict(),
                 is_final: bool = False
                 ) -> Tuple[List[List[List[int]]], Dict[str, torch.Tensor]]:
+        if not in_cache:
+            self.AllResetDetection()
         self.waveform = waveform  # compute decibel for each frame
         self.ComputeDecibel()
         self.ComputeScores(feats, in_cache)
diff --git a/funasr/models/frontend/wav_frontend.py b/funasr/models/frontend/wav_frontend.py
index 35fab57..f16bdd9 100644
--- a/funasr/models/frontend/wav_frontend.py
+++ b/funasr/models/frontend/wav_frontend.py
@@ -395,8 +395,10 @@
         return feats_pad, feats_lens, lfr_splice_frame_idxs
 
     def forward(
-            self, input: torch.Tensor, input_lengths: torch.Tensor, is_final: bool = False
+        self, input: torch.Tensor, input_lengths: torch.Tensor, is_final: bool = False, reset: bool = False
     ) -> Tuple[torch.Tensor, torch.Tensor]:
+        if reset:
+            self.cache_reset()
         batch_size = input.shape[0]
         assert batch_size == 1, 'we support to extract feature online only when the batch size is equal to 1 now'
         waveforms, feats, feats_lengths = self.forward_fbank(input, input_lengths)  # input shape: B T D
@@ -500,4 +502,4 @@
         feats_pad = pad_sequence(feats,
                                  batch_first=True,
                                  padding_value=0.0)
-        return feats_pad, feats_lens
\ No newline at end of file
+        return feats_pad, feats_lens

--
Gitblit v1.9.1