From d550a62bead38c06ab1430ae63c30fa46243e3c3 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期四, 29 六月 2023 19:40:39 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main
---
funasr/models/decoder/transformer_decoder.py | 10 ----------
1 files changed, 0 insertions(+), 10 deletions(-)
diff --git a/funasr/models/decoder/transformer_decoder.py b/funasr/models/decoder/transformer_decoder.py
index 45fdda8..0a9c612 100644
--- a/funasr/models/decoder/transformer_decoder.py
+++ b/funasr/models/decoder/transformer_decoder.py
@@ -9,7 +9,6 @@
import torch
from torch import nn
-from typeguard import check_argument_types
from funasr.models.decoder.abs_decoder import AbsDecoder
from funasr.modules.attention import MultiHeadedAttention
@@ -184,7 +183,6 @@
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
):
- assert check_argument_types()
super().__init__()
attention_dim = encoder_output_size
@@ -373,7 +371,6 @@
normalize_before: bool = True,
concat_after: bool = False,
):
- assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
@@ -428,7 +425,6 @@
concat_after: bool = False,
embeds_id: int = -1,
):
- assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
@@ -540,7 +536,6 @@
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
- assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
@@ -602,7 +597,6 @@
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
- assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
@@ -664,7 +658,6 @@
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
- assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
@@ -726,7 +719,6 @@
conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
conv_usebias: int = False,
):
- assert check_argument_types()
if len(conv_kernel_length) != num_blocks:
raise ValueError(
"conv_kernel_length must have equal number of values to num_blocks: "
@@ -781,7 +773,6 @@
pos_enc_class=PositionalEncoding,
normalize_before: bool = True,
):
- assert check_argument_types()
super().__init__()
attention_dim = encoder_output_size
@@ -955,7 +946,6 @@
normalize_before: bool = True,
concat_after: bool = False,
):
- assert check_argument_types()
super().__init__(
vocab_size=vocab_size,
encoder_output_size=encoder_output_size,
--
Gitblit v1.9.1