From 6d3a3da8a8c7d1be9740a9b2d6fac767f8dfff17 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 30 五月 2024 19:16:52 +0800
Subject: [PATCH] docs
---
funasr/models/sense_voice/decoder.py | 9 ++++++---
1 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/funasr/models/sense_voice/decoder.py b/funasr/models/sense_voice/decoder.py
index dd00ca8..60af29a 100644
--- a/funasr/models/sense_voice/decoder.py
+++ b/funasr/models/sense_voice/decoder.py
@@ -146,7 +146,9 @@
qk = qk + mask[:n_ctx, :n_ctx]
else:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
- min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
+ min_value = -float(
+ "inf"
+ ) # min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
qk = qk.masked_fill(mask, min_value)
qk = qk.float()
@@ -472,7 +474,7 @@
is_pad_mask = kwargs.get("is_pad_mask", False)
is_pad_memory_mask = kwargs.get("is_pad_memory_mask", False)
- fsmn_cache = cache[layer]["fsmn_cache"] if len(cache) > 0 or cache is None else None
+ fsmn_cache = cache[layer]["fsmn_cache"] if cache is not None and len(cache) > 0 else None
# if fsmn_cache is not None:
# x = x[:, -1:]
att_res, fsmn_cache = self.attn(self.attn_ln(x), mask=None, cache=fsmn_cache)
@@ -599,5 +601,6 @@
def score(self, ys, state, x):
"""Score."""
ys_mask = subsequent_mask(len(ys), device=x.device).unsqueeze(0)
- logp = self.forward(ys.unsqueeze(0), x.unsqueeze(0), cache=state)
+ logp = self.forward(ys.unsqueeze(0), x.unsqueeze(0), cache=None)
+ logp = torch.log_softmax(logp, dim=-1)
return logp.squeeze(0)[-1, :], state
--
Gitblit v1.9.1