From 8d7f76af46cf0e77317ec8e84fcce6f208f24204 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 07 六月 2024 11:40:46 +0800
Subject: [PATCH] auto frontend
---
funasr/models/language_model/transformer_lm.py | 7 ++-----
1 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/funasr/models/language_model/transformer_lm.py b/funasr/models/language_model/transformer_lm.py
index cc71142..3b90e3d 100644
--- a/funasr/models/language_model/transformer_lm.py
+++ b/funasr/models/language_model/transformer_lm.py
@@ -66,9 +66,7 @@
y = self.decoder(h)
return y, None
- def score(
- self, y: torch.Tensor, state: Any, x: torch.Tensor
- ) -> Tuple[torch.Tensor, Any]:
+ def score(self, y: torch.Tensor, state: Any, x: torch.Tensor) -> Tuple[torch.Tensor, Any]:
"""Score new token.
Args:
@@ -115,8 +113,7 @@
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
- torch.stack([states[b][i] for b in range(n_batch)])
- for i in range(n_layers)
+ torch.stack([states[b][i] for b in range(n_batch)]) for i in range(n_layers)
]
# batch decoding
--
Gitblit v1.9.1