From 4bfcfd7f13e34da6e25a38c77f1c3de7b138696a Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 22 四月 2025 09:53:18 +0800
Subject: [PATCH] Update README_zh.md
---
funasr/models/whisper/model.py | 9 ++++++++-
1 files changed, 8 insertions(+), 1 deletions(-)
diff --git a/funasr/models/whisper/model.py b/funasr/models/whisper/model.py
index 8e9245a..398eea3 100644
--- a/funasr/models/whisper/model.py
+++ b/funasr/models/whisper/model.py
@@ -7,7 +7,11 @@
import torch.nn.functional as F
from torch import Tensor
from torch import nn
+
import whisper
+
+# import whisper_timestamped as whisper
+
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.register import tables
@@ -24,6 +28,7 @@
@tables.register("model_classes", "Whisper-large-v1")
@tables.register("model_classes", "Whisper-large-v2")
@tables.register("model_classes", "Whisper-large-v3")
+@tables.register("model_classes", "Whisper-large-v3-turbo")
@tables.register("model_classes", "WhisperWarp")
class WhisperWarp(nn.Module):
def __init__(self, *args, **kwargs):
@@ -108,7 +113,9 @@
# decode the audio
options = whisper.DecodingOptions(**kwargs.get("DecodingOptions", {}))
- result = whisper.decode(self.model, speech, options)
+
+ result = whisper.decode(self.model, speech, options=options)
+ # result = whisper.transcribe(self.model, speech)
results = []
result_i = {"key": key[0], "text": result.text}
--
Gitblit v1.9.1