From 63e60cc43ddab5d28908e5e84e26a0553eb120f8 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 13:49:57 +0800
Subject: [PATCH] fp16
---
funasr/models/llm_asr/model.py | 7 +++++++
funasr/auto/auto_model.py | 2 ++
2 files changed, 9 insertions(+), 0 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 7b5a02f..22b1ac0 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -233,6 +233,8 @@
# fp16
if kwargs.get("fp16", False):
model.to(torch.float16)
+ elif kwargs.get("bf16", False):
+ model.to(torch.bfloat16)
return model, kwargs
def __call__(self, *args, **cfg):
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 5f15309..f72b2c8 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -684,6 +684,13 @@
# audio encoder
speech = batch["speech"]
speech_lengths = batch["speech_lengths"][:, 0]
+ # fp16
+ if kwargs.get("fp16", False):
+ speech = speech.to(torch.float16)
+ encoder_out_lens = encoder_out_lens.to(torch.float16)
+ elif kwargs.get("bf16", False):
+ speech = speech.to(torch.bfloat16)
+ encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
# audio_adaptor
--
Gitblit v1.9.1