From 1b9ac4f7a2eb600e5b769e7294cb4a8f4ec31b63 Mon Sep 17 00:00:00 2001
From: TeaPoly <lekai.huang@gmail.com>
Date: 星期五, 02 十二月 2022 12:00:22 +0800
Subject: [PATCH] Fix some issue to make batch inference easy for predictor and decoder.
---
funasr/models/predictor/cif.py | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/funasr/models/predictor/cif.py b/funasr/models/predictor/cif.py
index 8199708..cf60eaf 100644
--- a/funasr/models/predictor/cif.py
+++ b/funasr/models/predictor/cif.py
@@ -147,7 +147,7 @@
b, t, d = hidden.size()
tail_threshold = self.tail_threshold
tail_threshold = torch.tensor([tail_threshold], dtype=alphas.dtype).to(alphas.device)
- tail_threshold = torch.reshape(tail_threshold, (1, 1))
+ tail_threshold = tail_threshold.unsqueeze(0).repeat(b, 1)
alphas = torch.cat([alphas, tail_threshold], dim=1)
zeros = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
hidden = torch.cat([hidden, zeros], dim=1)
--
Gitblit v1.9.1