From 81fb78286f6e6893ef5a319bfb2ba21d340476d3 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 22 三月 2024 20:13:05 +0800
Subject: [PATCH] update
---
funasr/models/qwen_audio/model.py | 7 +++----
1 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/funasr/models/qwen_audio/model.py b/funasr/models/qwen_audio/model.py
index e419b1e..045cfe1 100644
--- a/funasr/models/qwen_audio/model.py
+++ b/funasr/models/qwen_audio/model.py
@@ -54,14 +54,13 @@
meta_data = {}
# meta_data["batch_data_time"] = -1
-
- sp_prompt = "<|startoftranscription|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>"
- query = f"<audio>{data_in[0]}</audio>{sp_prompt}"
+ prompt = kwargs.get("prompt", "<|startoftranscription|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>")
+ query = f"<audio>{data_in[0]}</audio>{prompt}"
audio_info = self.tokenizer.process_audio(query)
inputs = self.tokenizer(query, return_tensors='pt', audio_info=audio_info)
inputs = inputs.to(self.model.device)
pred = self.model.generate(**inputs, audio_info=audio_info)
- response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info)
+ response = self.tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info)
results = []
result_i = {"key": key[0], "text": response}
--
Gitblit v1.9.1