From a8591060d3889cd7a72841fa32a7ee64b49db1d2 Mon Sep 17 00:00:00 2001
From: msgk <zxr935867802@outlook.com>
Date: 星期五, 14 二月 2025 14:16:51 +0800
Subject: [PATCH] fix(spk): 修复 speaker embedding 集群后的重新排序问题
---
funasr/models/whisper/model.py | 9 ++++++++-
1 files changed, 8 insertions(+), 1 deletions(-)
diff --git a/funasr/models/whisper/model.py b/funasr/models/whisper/model.py
index 8e9245a..398eea3 100644
--- a/funasr/models/whisper/model.py
+++ b/funasr/models/whisper/model.py
@@ -7,7 +7,11 @@
import torch.nn.functional as F
from torch import Tensor
from torch import nn
+
import whisper
+
+# import whisper_timestamped as whisper
+
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.register import tables
@@ -24,6 +28,7 @@
@tables.register("model_classes", "Whisper-large-v1")
@tables.register("model_classes", "Whisper-large-v2")
@tables.register("model_classes", "Whisper-large-v3")
+@tables.register("model_classes", "Whisper-large-v3-turbo")
@tables.register("model_classes", "WhisperWarp")
class WhisperWarp(nn.Module):
def __init__(self, *args, **kwargs):
@@ -108,7 +113,9 @@
# decode the audio
options = whisper.DecodingOptions(**kwargs.get("DecodingOptions", {}))
- result = whisper.decode(self.model, speech, options)
+
+ result = whisper.decode(self.model, speech, options=options)
+ # result = whisper.transcribe(self.model, speech)
results = []
result_i = {"key": key[0], "text": result.text}
--
Gitblit v1.9.1