From 2a66366be4c2715870e4859fd5a5db6e8a9dc00a Mon Sep 17 00:00:00 2001
From: chenmengzheAAA <123789350+chenmengzheAAA@users.noreply.github.com>
Date: 星期四, 14 九月 2023 19:00:17 +0800
Subject: [PATCH] Merge pull request #956 from alibaba-damo-academy/chenmengzheAAA-patch-4
---
funasr/models/encoder/rnn_encoder.py | 6 ++----
1 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/funasr/models/encoder/rnn_encoder.py b/funasr/models/encoder/rnn_encoder.py
index 59730da..353beaf 100644
--- a/funasr/models/encoder/rnn_encoder.py
+++ b/funasr/models/encoder/rnn_encoder.py
@@ -5,7 +5,6 @@
import numpy as np
import torch
-from typeguard import check_argument_types
from funasr.modules.nets_utils import make_pad_mask
from funasr.modules.rnn.encoders import RNN
@@ -37,7 +36,6 @@
dropout: float = 0.0,
subsample: Optional[Sequence[int]] = (2, 2, 1, 1),
):
- assert check_argument_types()
super().__init__()
self._output_size = output_size
self.rnn_type = rnn_type
@@ -48,12 +46,12 @@
raise ValueError(f"Not supported rnn_type={rnn_type}")
if subsample is None:
- subsample = np.ones(num_layers + 1, dtype=np.int)
+ subsample = np.ones(num_layers + 1, dtype=np.int32)
else:
subsample = subsample[:num_layers]
# Append 1 at the beginning because the second or later is used
subsample = np.pad(
- np.array(subsample, dtype=np.int),
+ np.array(subsample, dtype=np.int32),
[1, num_layers - len(subsample)],
mode="constant",
constant_values=1,
--
Gitblit v1.9.1