From 0143122a4e2ee86cc27ba137b2bb0530577cbf12 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 12 一月 2024 10:27:36 +0800
Subject: [PATCH] funasr1.0 streaming demo

---
 funasr/models/paraformer/model.py |    6 ++----
 1 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/funasr/models/paraformer/model.py b/funasr/models/paraformer/model.py
index 5492420..78a72ec 100644
--- a/funasr/models/paraformer/model.py
+++ b/funasr/models/paraformer/model.py
@@ -22,7 +22,7 @@
 
 from torch.cuda.amp import autocast
 
-from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank, load_audio_and_text_image_video
+from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
 from funasr.utils import postprocess_utils
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.register import tables
@@ -447,7 +447,6 @@
              frontend=None,
              **kwargs,
              ):
-		
 		# init beamsearch
 		is_use_ctc = kwargs.get("decoding_ctc_weight", 0.0) > 0.00001 and self.ctc != None
 		is_use_lm = kwargs.get("lm_weight", 0.0) > 0.00001 and kwargs.get("lm_file", None) is not None
@@ -466,7 +465,7 @@
 		else:
 			# extract fbank feats
 			time1 = time.perf_counter()
-			audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), data_type=kwargs.get("data_type", "sound"), tokenizer=tokenizer)
+			audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), data_type=kwargs.get("data_type", "sound"), tokenizer=tokenizer)
 			time2 = time.perf_counter()
 			meta_data["load_data"] = f"{time2 - time1:0.3f}"
 			speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend)
@@ -475,7 +474,6 @@
 			meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
 			
 		speech.to(device=kwargs["device"]), speech_lengths.to(device=kwargs["device"])
-
 		# Encoder
 		encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 		if isinstance(encoder_out, tuple):

--
Gitblit v1.9.1