From 9aa3a4d0226943e5b3d0fee54f697164ec90cfa2 Mon Sep 17 00:00:00 2001
From: 北念 <lzr265946@alibaba-inc.com>
Date: 星期二, 31 十月 2023 19:52:55 +0800
Subject: [PATCH] add paraformer-16k-en finetune pipeline

---
 funasr/modules/streaming_utils/chunk_utilis.py |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/funasr/modules/streaming_utils/chunk_utilis.py b/funasr/modules/streaming_utils/chunk_utilis.py
index ea37c68..73dbaf4 100644
--- a/funasr/modules/streaming_utils/chunk_utilis.py
+++ b/funasr/modules/streaming_utils/chunk_utilis.py
@@ -11,7 +11,7 @@
 
 class overlap_chunk():
 	"""
-	author: Speech Lab, Alibaba Group, China
+	Author: Speech Lab of DAMO Academy, Alibaba Group
 	San-m: Memory equipped self-attention for end-to-end speech recognition
 	https://arxiv.org/abs/2006.01713
 
@@ -61,7 +61,7 @@
 		chunk_num = len(self.chunk_size)
 		ind = 0
 		if training and chunk_num > 1:
-			ind = torch.randint(0, chunk_num-1, ()).cpu().item()
+			ind = torch.randint(0, chunk_num, ()).cpu().item()
 		if not training and decoding_ind is not None:
 			ind = int(decoding_ind)
 

--
Gitblit v1.9.1