From 149063ced4d2d5269f0472677228eadfcb4a4d8a Mon Sep 17 00:00:00 2001
From: 维石 <shixian.shi@alibaba-inc.com>
Date: 星期三, 17 四月 2024 14:33:24 +0800
Subject: [PATCH] update seaco finetune
---
funasr/auto/auto_model.py | 19 ++++++++++++++-----
1 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index bd786d0..630c390 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -21,15 +21,17 @@
from funasr.utils.timestamp_tools import timestamp_sentence
from funasr.download.download_from_hub import download_model
from funasr.utils.vad_utils import slice_padding_audio_samples
+from funasr.utils.vad_utils import merge_vad
from funasr.utils.load_utils import load_audio_text_image_video
from funasr.train_utils.set_all_random_seed import set_all_random_seed
from funasr.train_utils.load_pretrained_model import load_pretrained_model
from funasr.utils import export_utils
+
try:
from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
from funasr.models.campplus.cluster_backend import ClusterBackend
except:
- print("Notice: If you want to use the speaker diarization, please `pip install hdbscan`")
+ pass
def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
@@ -110,7 +112,7 @@
if vad_model is not None:
logging.info("Building VAD model.")
vad_kwargs["model"] = vad_model
- vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", None)
+ vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", "master")
vad_kwargs["device"] = kwargs["device"]
vad_model, vad_kwargs = self.build_model(**vad_kwargs)
@@ -120,7 +122,7 @@
if punc_model is not None:
logging.info("Building punc model.")
punc_kwargs["model"] = punc_model
- punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", None)
+ punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", "master")
punc_kwargs["device"] = kwargs["device"]
punc_model, punc_kwargs = self.build_model(**punc_kwargs)
@@ -130,7 +132,7 @@
if spk_model is not None:
logging.info("Building SPK model.")
spk_kwargs["model"] = spk_model
- spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", None)
+ spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", "master")
spk_kwargs["device"] = kwargs["device"]
spk_model, spk_kwargs = self.build_model(**spk_kwargs)
self.cb_model = ClusterBackend().to(kwargs["device"])
@@ -201,7 +203,7 @@
load_pretrained_model(
model=model,
path=init_param,
- ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
+ ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
oss_bucket=kwargs.get("oss_bucket", None),
scope_map=kwargs.get("scope_map", []),
excludes=kwargs.get("excludes", None),
@@ -209,6 +211,9 @@
else:
print(f"error, init_param does not exist!: {init_param}")
+ # fp16
+ if kwargs.get("fp16", False):
+ model.to(torch.float16)
return model, kwargs
def __call__(self, *args, **cfg):
@@ -294,6 +299,10 @@
res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
end_vad = time.time()
+ # FIX(gcf): concat the vad clips for sense vocie model for better aed
+ if kwargs.get("merge_vad", False):
+ for i in range(len(res)):
+ res[i]['value'] = merge_vad(res[i]['value'], kwargs.get("merge_length", 15000))
# step.2 compute asr model
model = self.model
--
Gitblit v1.9.1