From 27d2d3ccb7f64a5b21407adc6fd7ec0bddf0941d Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 12 四月 2024 15:30:03 +0800
Subject: [PATCH] add
---
funasr/models/qwen_audio/model.py | 10 ++++++----
1 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/funasr/models/qwen_audio/model.py b/funasr/models/qwen_audio/model.py
index 86b4105..f981b67 100644
--- a/funasr/models/qwen_audio/model.py
+++ b/funasr/models/qwen_audio/model.py
@@ -9,8 +9,7 @@
from torch import nn
import whisper
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
-from transformers import AutoModelForCausalLM, AutoTokenizer
-from transformers.generation import GenerationConfig
+
from funasr.register import tables
@@ -27,6 +26,8 @@
"""
def __init__(self, *args, **kwargs):
super().__init__()
+ from transformers import AutoModelForCausalLM, AutoTokenizer
+ from transformers.generation import GenerationConfig
model_or_path = kwargs.get("model_path", "QwenAudio")
model = AutoModelForCausalLM.from_pretrained(model_or_path, device_map="cpu",
@@ -60,7 +61,7 @@
inputs = self.tokenizer(query, return_tensors='pt', audio_info=audio_info)
inputs = inputs.to(self.model.device)
pred = self.model.generate(**inputs, audio_info=audio_info)
- response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info)
+ response = self.tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info)
results = []
result_i = {"key": key[0], "text": response}
@@ -82,7 +83,8 @@
Modified from https://github.com/QwenLM/Qwen-Audio
"""
super().__init__()
-
+ from transformers import AutoModelForCausalLM, AutoTokenizer
+ from transformers.generation import GenerationConfig
model_or_path = kwargs.get("model_path", "QwenAudio")
bf16 = kwargs.get("bf16", False)
fp16 = kwargs.get("fp16", False)
--
Gitblit v1.9.1