From f57b3788f2c91e52a48629423c7bc0539e44f793 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 15:47:21 +0800
Subject: [PATCH] fixbug
---
funasr/models/llm_asr/model.py | 2 --
funasr/auto/auto_model.py | 2 +-
2 files changed, 1 insertions(+), 3 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 047e652..bbaf657 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -212,7 +212,6 @@
deep_update(model_conf, kwargs.get("model_conf", {}))
deep_update(model_conf, kwargs)
model = model_class(**model_conf, vocab_size=vocab_size)
- model.to(device)
# init_param
init_param = kwargs.get("init_param", None)
@@ -235,6 +234,7 @@
model.to(torch.float16)
elif kwargs.get("bf16", False):
model.to(torch.bfloat16)
+ model.to(device)
return model, kwargs
def __call__(self, *args, **cfg):
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 66c416b..dd806cf 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -687,10 +687,8 @@
# fp16
if kwargs.get("fp16", False):
speech = speech.to(torch.float16)
- encoder_out_lens = encoder_out_lens.to(torch.float16)
elif kwargs.get("bf16", False):
speech = speech.to(torch.bfloat16)
- encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
# audio_adaptor
--
Gitblit v1.9.1