From e299cfecaf979833d9c4d7c70e44cb92ea066afe Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 09 五月 2024 20:02:37 +0800
Subject: [PATCH] total_time/accum_grad
---
funasr/models/language_model/transformer_lm.py | 7 ++-----
1 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/funasr/models/language_model/transformer_lm.py b/funasr/models/language_model/transformer_lm.py
index cc71142..3b90e3d 100644
--- a/funasr/models/language_model/transformer_lm.py
+++ b/funasr/models/language_model/transformer_lm.py
@@ -66,9 +66,7 @@
y = self.decoder(h)
return y, None
- def score(
- self, y: torch.Tensor, state: Any, x: torch.Tensor
- ) -> Tuple[torch.Tensor, Any]:
+ def score(self, y: torch.Tensor, state: Any, x: torch.Tensor) -> Tuple[torch.Tensor, Any]:
"""Score new token.
Args:
@@ -115,8 +113,7 @@
else:
# transpose state of [batch, layer] into [layer, batch]
batch_state = [
- torch.stack([states[b][i] for b in range(n_batch)])
- for i in range(n_layers)
+ torch.stack([states[b][i] for b in range(n_batch)]) for i in range(n_layers)
]
# batch decoding
--
Gitblit v1.9.1