From e24dbdc496debec225414d4d2c760f5775e64f2a Mon Sep 17 00:00:00 2001
From: 天地 <tiandiweizun@gmail.com>
Date: 星期三, 26 三月 2025 13:44:41 +0800
Subject: [PATCH] 感觉应该从文件读取更合适,因为上面判断了文件存在,且可以读取,如果本身是文本的话,下面也会有逻辑进行处理 (#2452)
---
funasr/models/whisper/model.py | 9 ++++++++-
1 files changed, 8 insertions(+), 1 deletions(-)
diff --git a/funasr/models/whisper/model.py b/funasr/models/whisper/model.py
index 8e9245a..398eea3 100644
--- a/funasr/models/whisper/model.py
+++ b/funasr/models/whisper/model.py
@@ -7,7 +7,11 @@
import torch.nn.functional as F
from torch import Tensor
from torch import nn
+
import whisper
+
+# import whisper_timestamped as whisper
+
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.register import tables
@@ -24,6 +28,7 @@
@tables.register("model_classes", "Whisper-large-v1")
@tables.register("model_classes", "Whisper-large-v2")
@tables.register("model_classes", "Whisper-large-v3")
+@tables.register("model_classes", "Whisper-large-v3-turbo")
@tables.register("model_classes", "WhisperWarp")
class WhisperWarp(nn.Module):
def __init__(self, *args, **kwargs):
@@ -108,7 +113,9 @@
# decode the audio
options = whisper.DecodingOptions(**kwargs.get("DecodingOptions", {}))
- result = whisper.decode(self.model, speech, options)
+
+ result = whisper.decode(self.model, speech, options=options)
+ # result = whisper.transcribe(self.model, speech)
results = []
result_i = {"key": key[0], "text": result.text}
--
Gitblit v1.9.1