From e9d2cfc3a134b00f4e98271fbee3838d1ccecbcc Mon Sep 17 00:00:00 2001
From: VirtuosoQ <2416050435@qq.com>
Date: 星期五, 26 四月 2024 14:59:30 +0800
Subject: [PATCH] FunASR java http  client

---
 funasr/models/qwen_audio/model.py |   15 ++++++++-------
 1 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/funasr/models/qwen_audio/model.py b/funasr/models/qwen_audio/model.py
index e419b1e..f981b67 100644
--- a/funasr/models/qwen_audio/model.py
+++ b/funasr/models/qwen_audio/model.py
@@ -9,8 +9,7 @@
 from torch import nn
 import whisper
 from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
-from transformers import AutoModelForCausalLM, AutoTokenizer
-from transformers.generation import GenerationConfig
+
 
 from funasr.register import tables
 
@@ -27,6 +26,8 @@
     """
     def __init__(self, *args, **kwargs):
         super().__init__()
+        from transformers import AutoModelForCausalLM, AutoTokenizer
+        from transformers.generation import GenerationConfig
 
         model_or_path = kwargs.get("model_path", "QwenAudio")
         model = AutoModelForCausalLM.from_pretrained(model_or_path, device_map="cpu",
@@ -54,14 +55,13 @@
 
         meta_data = {}
         # meta_data["batch_data_time"] = -1
-
-        sp_prompt = "<|startoftranscription|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>"
-        query = f"<audio>{data_in[0]}</audio>{sp_prompt}"
+        prompt = kwargs.get("prompt", "<|startoftranscription|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>")
+        query = f"<audio>{data_in[0]}</audio>{prompt}"
         audio_info = self.tokenizer.process_audio(query)
         inputs = self.tokenizer(query, return_tensors='pt', audio_info=audio_info)
         inputs = inputs.to(self.model.device)
         pred = self.model.generate(**inputs, audio_info=audio_info)
-        response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info)
+        response = self.tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info)
 
         results = []
         result_i = {"key": key[0], "text": response}
@@ -83,7 +83,8 @@
         Modified from https://github.com/QwenLM/Qwen-Audio
         """
         super().__init__()
-        
+        from transformers import AutoModelForCausalLM, AutoTokenizer
+        from transformers.generation import GenerationConfig
         model_or_path = kwargs.get("model_path", "QwenAudio")
         bf16 = kwargs.get("bf16", False)
         fp16 = kwargs.get("fp16", False)

--
Gitblit v1.9.1