| examples/industrial_data_pretraining/qwen_audio/demo.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| examples/industrial_data_pretraining/qwen_audio/demo_chat.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| examples/industrial_data_pretraining/qwen_audio/demo_chat_from_local.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| examples/industrial_data_pretraining/qwen_audio/demo_from_local.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/auto/auto_model.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/download/download_from_hub.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/download/name_maps_from_hub.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/models/qwen_audio/model.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| funasr/version.txt | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| setup.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
examples/industrial_data_pretraining/qwen_audio/demo.py
New file @@ -0,0 +1,15 @@ #!/usr/bin/env python3 # -*- encoding: utf-8 -*- # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. # MIT License (https://opensource.org/licenses/MIT) # To install requirements: pip3 install -U "funasr[llm]" from funasr import AutoModel model = AutoModel(model="Qwen/Qwen-Audio", model_path=None, ) res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", language=None) print(res) examples/industrial_data_pretraining/qwen_audio/demo_chat.py
New file @@ -0,0 +1,26 @@ #!/usr/bin/env python3 # -*- encoding: utf-8 -*- # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. # MIT License (https://opensource.org/licenses/MIT) # To install requirements: pip3 install -U "funasr[llm]" from funasr import AutoModel model = AutoModel(model="Qwen/Qwen-Audio-Chat", model_path=None, ) audio_in = "https://github.com/QwenLM/Qwen-Audio/raw/main/assets/audio/1272-128104-0000.flac" # 1st dialogue turn prompt = 'what does the person say?' cache = {"history": None} res = model.generate(input=audio_in, prompt=prompt, cache=cache) print(res) prompt = 'Find the start time and end time of the word "middle classes"' # 2nd dialogue turn res = model.generate(input=None, prompt=prompt, cache=cache) print(res) examples/industrial_data_pretraining/qwen_audio/demo_chat_from_local.py
New file @@ -0,0 +1,26 @@ #!/usr/bin/env python3 # -*- encoding: utf-8 -*- # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. # MIT License (https://opensource.org/licenses/MIT) # To install requirements: pip3 install -U "funasr[llm]" from funasr import AutoModel model = AutoModel(model="Qwen/Qwen-Audio-Chat", model_path="/nfs/zhifu.gzf/init_model/qwen/Qwen-Audio-Chat", ) audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav" # 1st dialogue turn prompt = 'what does the person say?' cache = {"history": None} res = model.generate(input=audio_in, prompt=prompt, cache=cache) print(res) prompt = 'Find the start time and end time of the word "middle classes"' # 2nd dialogue turn res = model.generate(input=None, prompt=prompt, cache=cache) print(res) examples/industrial_data_pretraining/qwen_audio/demo_from_local.py
New file @@ -0,0 +1,15 @@ #!/usr/bin/env python3 # -*- encoding: utf-8 -*- # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. # MIT License (https://opensource.org/licenses/MIT) # To install requirements: pip3 install -U "funasr[llm]" from funasr import AutoModel model = AutoModel(model="Qwen/Qwen-Audio", model_path="/nfs/zhifu.gzf/init_model/qwen/Qwen-Audio", ) res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", language=None) print(res) funasr/auto/auto_model.py
@@ -245,7 +245,10 @@ time1 = time.perf_counter() with torch.no_grad(): results, meta_data = model.inference(**batch, **kwargs) res = model.inference(**batch, **kwargs) if isinstance(res, (list, tuple)): results = res[0] meta_data = res[1] if len(res) > 1 else {} time2 = time.perf_counter() asr_result_list.extend(results) funasr/download/download_from_hub.py
@@ -13,10 +13,16 @@ pass elif hub == "openai": model_or_path = kwargs.get("model") if model_or_path in name_maps_openai: model_or_path = name_maps_openai[model_or_path] kwargs["model_path"] = model_or_path if os.path.exists(model_or_path): # local path kwargs["model_path"] = model_or_path kwargs["model"] = "WhisperWarp" else: # model name if model_or_path in name_maps_openai: model_or_path = name_maps_openai[model_or_path] kwargs["model_path"] = model_or_path return kwargs def download_from_ms(**kwargs): @@ -24,7 +30,7 @@ if model_or_path in name_maps_ms: model_or_path = name_maps_ms[model_or_path] model_revision = kwargs.get("model_revision") if not os.path.exists(model_or_path): if not os.path.exists(model_or_path) and "model_path" not in kwargs: try: model_or_path = get_or_download_model_dir(model_or_path, model_revision, is_training=kwargs.get("is_training"), @@ -32,7 +38,7 @@ except Exception as e: print(f"Download: {model_or_path} failed!: {e}") kwargs["model_path"] = model_or_path kwargs["model_path"] = model_or_path if "model_path" not in kwargs else kwargs["model_path"] if os.path.exists(os.path.join(model_or_path, "configuration.json")): with open(os.path.join(model_or_path, "configuration.json"), 'r', encoding='utf-8') as f: funasr/download/name_maps_from_hub.py
@@ -10,6 +10,7 @@ "cam++": "damo/speech_campplus_sv_zh-cn_16k-common", "Whisper-large-v2": "iic/speech_whisper-large_asr_multilingual", "Whisper-large-v3": "iic/Whisper-large-v3", "Qwen-Audio": "Qwen/Qwen-Audio", } name_maps_hf = { funasr/models/qwen_audio/model.py
@@ -9,25 +9,84 @@ from torch import nn import whisper from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig from funasr.register import tables @tables.register("model_classes", "Qwen/Qwen-Audio") @tables.register("model_classes", "Qwen-Audio") @tables.register("model_classes", "Qwen/QwenAudio") @tables.register("model_classes", "QwenAudio") @tables.register("model_classes", "QwenAudioWarp") class WhisperWarp(nn.Module): def __init__(self, whisper_dims: dict, **kwargs): class QwenAudioWarp(nn.Module): def __init__(self, *args, **kwargs): super().__init__() hub = kwargs.get("hub", "funasr") if hub == "openai": init_param_path = kwargs.get("init_param_path", "large-v3") model = whisper.load_model(init_param_path) else: dims = whisper.model.ModelDimensions(**whisper_dims) model = whisper.model.Whisper(dims=dims) model_or_path = kwargs.get("model_path", "QwenAudio") model = AutoModelForCausalLM.from_pretrained(model_or_path, device_map="cpu", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_or_path, trust_remote_code=True) self.model = model self.tokenizer = tokenizer def forward(self, ): pass def inference(self, data_in, data_lengths=None, key: list = None, tokenizer=None, frontend=None, **kwargs, ): if kwargs.get("batch_size", 1) > 1: raise NotImplementedError("batch decoding is not implemented") meta_data = {} # meta_data["batch_data_time"] = -1 sp_prompt = "<|startoftranscription|><|en|><|transcribe|><|en|><|notimestamps|><|wo_itn|>" query = f"<audio>{data_in[0]}</audio>{sp_prompt}" audio_info = self.tokenizer.process_audio(query) inputs = self.tokenizer(query, return_tensors='pt', audio_info=audio_info) inputs = inputs.to(self.model.device) pred = self.model.generate(**inputs, audio_info=audio_info) response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False, audio_info=audio_info) results = [] result_i = {"key": key[0], "text": response} results.append(result_i) return results, meta_data @tables.register("model_classes", "Qwen/Qwen-Audio-Chat") @tables.register("model_classes", "Qwen/QwenAudioChat") @tables.register("model_classes", "Qwen-Audio-Chat") @tables.register("model_classes", "QwenAudioChat") @tables.register("model_classes", "QwenAudioChatWarp") class QwenAudioChatWarp(nn.Module): def __init__(self, *args, **kwargs): super().__init__() model_or_path = kwargs.get("model_path", "QwenAudio") bf16 = kwargs.get("bf16", False) fp16 = kwargs.get("fp16", False) model = AutoModelForCausalLM.from_pretrained(model_or_path, device_map="cpu", bf16=bf16, fp16=fp16, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_or_path, trust_remote_code=True) self.model = model self.tokenizer = tokenizer def forward(self, ): pass @@ -41,45 +100,29 @@ ): if kwargs.get("batch_size", 1) > 1: raise NotImplementedError("batch decoding is not implemented") meta_data = {} if isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank": # fbank speech, speech_lengths = data_in, data_lengths if len(speech.shape) < 3: speech = speech[None, :, :] if speech_lengths is None: speech_lengths = speech.shape[1] prompt = kwargs.get("prompt", "what does the person say?") cache = kwargs.get("cache", {}) history = cache.get("history", None) if data_in[0] is not None: # 1st dialogue turn query = self.tokenizer.from_list_format([ {'audio': data_in[0]}, # Either a local path or an url {'text': prompt}, ]) else: # extract fbank feats time1 = time.perf_counter() audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), data_type=kwargs.get("data_type", "sound"), tokenizer=tokenizer) time2 = time.perf_counter() meta_data["load_data"] = f"{time2 - time1:0.3f}" speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend) time3 = time.perf_counter() meta_data["extract_feat"] = f"{time3 - time2:0.3f}" frame_shift = frontend.frame_shift if hasattr(frontend, "frame_shift") else 10 lfr_n = frontend.lfr_n if hasattr(frontend, "lfr_n") else 1 meta_data["batch_data_time"] = speech_lengths.sum().item() * frame_shift * lfr_n / 1000 speech = speech.to(device=kwargs["device"])[0, :, :] speech_lengths = speech_lengths.to(device=kwargs["device"]) # detect the spoken language _, probs = self.model.detect_language(speech) print(f"Detected language: {max(probs, key=probs.get)}") # decode the audio options = whisper.DecodingOptions(language=kwargs.get("language", None), fp16=False) result = whisper.decode(self.model, speech, options) query = prompt response, history = self.model.chat(self.tokenizer, query=query, history=history) cache["history"] = history # print(response) # The person says: "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel". results = [] result_i = {"key": key[0], "text": result.text} result_i = {"key": key[0], "text": response} results.append(result_i) return results, meta_data funasr/version.txt
@@ -1 +1 @@ 1.0.13 1.0.14 setup.py
@@ -41,6 +41,7 @@ "jaconv", "hydra-core>=1.3.2", "tensorboardX", "rotary_embedding_torch", ], # train: The modules invoked when training only. "train": [ @@ -82,6 +83,17 @@ "sphinx-markdown-tables>=0.0.12", "configargparse>=1.2.1" ], "llm":[ "transformers>=4.32.0", "accelerate", "tiktoken", "einops", "transformers_stream_generator>=0.0.4", "scipy", "torchvision", "pillow", "matplotlib", ], } requirements["all"].extend(requirements["train"]) requirements["test"].extend(requirements["train"])