From bf4b3ef9cb95acaa2b92b98f236c4f3228cdbc2d Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 21 九月 2023 16:30:43 +0800
Subject: [PATCH] Merge pull request #976 from alibaba-damo-academy/dev_lhn
---
funasr/tasks/sa_asr.py | 20 ++++++--------------
1 files changed, 6 insertions(+), 14 deletions(-)
diff --git a/funasr/tasks/sa_asr.py b/funasr/tasks/sa_asr.py
index 738ec52..e7ee5a3 100644
--- a/funasr/tasks/sa_asr.py
+++ b/funasr/tasks/sa_asr.py
@@ -13,8 +13,6 @@
import numpy as np
import torch
import yaml
-from typeguard import check_argument_types
-from typeguard import check_return_type
from funasr.datasets.collate_fn import CommonCollateFn
from funasr.datasets.preprocessor import CommonPreprocessor
@@ -28,7 +26,7 @@
from funasr.models.decoder.transformer_decoder import (
DynamicConvolution2DTransformerDecoder, # noqa: H301
)
-from funasr.models.decoder.transformer_decoder_sa_asr import SAAsrTransformerDecoder
+from funasr.models.decoder.transformer_decoder import SAAsrTransformerDecoder
from funasr.models.decoder.transformer_decoder import DynamicConvolutionTransformerDecoder
from funasr.models.decoder.transformer_decoder import (
LightweightConvolution2DTransformerDecoder, # noqa: H301
@@ -39,7 +37,7 @@
from funasr.models.decoder.transformer_decoder import ParaformerDecoderSAN
from funasr.models.decoder.transformer_decoder import TransformerDecoder
from funasr.models.decoder.contextual_decoder import ContextualParaformerDecoder
-from funasr.models.e2e_sa_asr import ESPnetASRModel
+from funasr.models.e2e_sa_asr import SAASRModel
from funasr.models.e2e_asr_paraformer import Paraformer, ParaformerBert, BiCifParaformer, ContextualParaformer
from funasr.models.e2e_tp import TimestampPredictor
from funasr.models.e2e_asr_mfcca import MFCCA
@@ -70,11 +68,11 @@
from funasr.models.specaug.abs_specaug import AbsSpecAug
from funasr.models.specaug.specaug import SpecAug
from funasr.models.specaug.specaug import SpecAugLFR
+from funasr.models.base_model import FunASRModel
from funasr.modules.subsampling import Conv1dSubsampling
from funasr.tasks.abs_task import AbsTask
from funasr.text.phoneme_tokenizer import g2p_choices
from funasr.torch_utils.initialize import initialize
-from funasr.train.abs_espnet_model import AbsESPnetModel
from funasr.train.class_choices import ClassChoices
from funasr.train.trainer import Trainer
from funasr.utils.get_default_kwargs import get_default_kwargs
@@ -120,7 +118,7 @@
model_choices = ClassChoices(
"model",
classes=dict(
- asr=ESPnetASRModel,
+ asr=SAASRModel,
uniasr=UniASR,
paraformer=Paraformer,
paraformer_bert=ParaformerBert,
@@ -129,7 +127,7 @@
mfcca=MFCCA,
timestamp_prediction=TimestampPredictor,
),
- type_check=AbsESPnetModel,
+ type_check=FunASRModel,
default="asr",
)
preencoder_choices = ClassChoices(
@@ -445,7 +443,6 @@
[Collection[Tuple[str, Dict[str, np.ndarray]]]],
Tuple[List[str], Dict[str, torch.Tensor]],
]:
- assert check_argument_types()
# NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
@@ -453,7 +450,6 @@
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
- assert check_argument_types()
if args.use_preprocessor:
retval = CommonPreprocessor(
train=train,
@@ -483,7 +479,6 @@
)
else:
retval = None
- assert check_return_type(retval)
return retval
@classmethod
@@ -502,12 +497,10 @@
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
retval = ()
- assert check_return_type(retval)
return retval
@classmethod
def build_model(cls, args: argparse.Namespace):
- assert check_argument_types()
if isinstance(args.token_list, str):
with open(args.token_list, encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
@@ -619,5 +612,4 @@
if args.init is not None:
initialize(model, args.init)
- assert check_return_type(model)
- return model
+ return model
\ No newline at end of file
--
Gitblit v1.9.1