| funasr/models/transformer/attention.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
funasr/models/transformer/attention.py
@@ -82,7 +82,10 @@ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2) min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min) min_value = -float( "inf" ) # min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min) scores = scores.masked_fill(mask, min_value) self.attn = torch.softmax(scores, dim=-1).masked_fill( mask, 0.0