From 98abc0e5ac1a1da0fe1802d9ffb623802fbf0b2f Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期四, 29 六月 2023 16:30:39 +0800
Subject: [PATCH] update setup (#686)

---
 funasr/datasets/small_datasets/length_batch_sampler.py                                     |    2 
 funasr/models/e2e_asr.py                                                                   |    2 
 funasr/datasets/small_datasets/preprocessor.py                                             |   11 -
 funasr/text/token_id_converter.py                                                          |    2 
 funasr/bin/lm_inference_launch.py                                                          |    2 
 funasr/build_utils/build_trainer.py                                                        |    5 
 funasr/optimizers/sgd.py                                                                   |    2 
 funasr/tasks/vad.py                                                                        |    8 
 funasr/tasks/sa_asr.py                                                                     |    8 
 funasr/tasks/punctuation.py                                                                |    9 
 funasr/text/cleaner.py                                                                     |    2 
 funasr/runtime/python/onnxruntime/funasr_onnx/utils/utils.py                               |    3 
 funasr/modules/eend_ola/utils/report.py                                                    |    2 
 funasr/models/encoder/opennmt_encoders/conv_encoder.py                                     |    2 
 funasr/main_funcs/collect_stats.py                                                         |    2 
 funasr/bin/sv_inference_launch.py                                                          |    7 
 funasr/models/encoder/conformer_encoder.py                                                 |    3 
 funasr/models/encoder/sanm_encoder.py                                                      |    4 
 funasr/tasks/diar.py                                                                       |   16 -
 funasr/tasks/lm.py                                                                         |    8 
 funasr/torch_utils/initialize.py                                                           |    2 
 funasr/bin/punc_inference_launch.py                                                        |    3 
 funasr/layers/mask_along_axis.py                                                           |    4 
 funasr/tasks/abs_task.py                                                                   |   16 -
 funasr/text/sentencepiece_tokenizer.py                                                     |    2 
 funasr/bin/asr_infer.py                                                                    |   52 ----
 funasr/fileio/datadir_writer.py                                                            |    6 
 funasr/layers/label_aggregation.py                                                         |    2 
 funasr/models/encoder/data2vec_encoder.py                                                  |    2 
 funasr/models/decoder/rnnt_decoder.py                                                      |    2 
 funasr/models/e2e_diar_eend_ola.py                                                         |    2 
 funasr/datasets/collate_fn.py                                                              |    7 
 funasr/models/e2e_diar_sond.py                                                             |    2 
 funasr/models/e2e_tp.py                                                                    |    2 
 funasr/train/abs_model.py                                                                  |    3 
 funasr/samplers/num_elements_batch_sampler.py                                              |    2 
 funasr/models/frontend/windowing.py                                                        |    2 
 funasr/fileio/rand_gen_dataset.py                                                          |    3 
 funasr/text/build_tokenizer.py                                                             |    2 
 funasr/datasets/iterable_dataset.py                                                        |    2 
 funasr/models/preencoder/sinc.py                                                           |    3 
 funasr/build_utils/build_model_from_file.py                                                |    2 
 funasr/models/decoder/sanm_decoder.py                                                      |    3 
 funasr/text/phoneme_tokenizer.py                                                           |    2 
 funasr/bin/vad_infer.py                                                                    |    4 
 funasr/models/decoder/transformer_decoder.py                                               |   10 
 funasr/train/class_choices.py                                                              |    5 
 funasr/models/decoder/rnn_decoder.py                                                       |    2 
 funasr/utils/griffin_lim.py                                                                |    2 
 funasr/datasets/dataset.py                                                                 |    6 
 funasr/layers/utterance_mvn.py                                                             |    2 
 funasr/text/word_tokenizer.py                                                              |    2 
 funasr/models/e2e_asr_transducer.py                                                        |    9 
 funasr/models/encoder/rnn_encoder.py                                                       |    2 
 egs/alimeeting/sa_asr/local/format_wav_scp.py                                              |    2 
 funasr/train/reporter.py                                                                   |    9 
 funasr/bin/vad_inference_launch.py                                                         |    3 
 funasr/models/e2e_asr_paraformer.py                                                        |    6 
 funasr/bin/diar_inference_launch.py                                                        |    3 
 funasr/build_utils/build_sv_model.py                                                       |    2 
 funasr/models/postencoder/hugging_face_transformers_postencoder.py                         |    2 
 funasr/samplers/length_batch_sampler.py                                                    |    2 
 funasr/main_funcs/average_nbest_models.py                                                  |    2 
 funasr/samplers/sorted_batch_sampler.py                                                    |    2 
 funasr/models/e2e_asr_contextual_paraformer.py                                             |    2 
 funasr/schedulers/warmup_lr.py                                                             |    2 
 funasr/layers/global_mvn.py                                                                |    2 
 funasr/models/e2e_sv.py                                                                    |    2 
 funasr/models/preencoder/linear.py                                                         |    2 
 funasr/datasets/large_datasets/build_dataloader.py                                         |    2 
 funasr/train/trainer.py                                                                    |    5 
 setup.py                                                                                   |   54 +---
 funasr/datasets/small_datasets/dataset.py                                                  |    6 
 funasr/bin/diar_infer.py                                                                   |   65 -----
 funasr/export/export_model.py                                                              |    2 
 funasr/torch_utils/forward_adaptor.py                                                      |    2 
 funasr/bin/tp_inference_launch.py                                                          |    2 
 funasr/layers/stft.py                                                                      |    2 
 funasr/models/data2vec.py                                                                  |    2 
 funasr/models/encoder/transformer_encoder.py                                               |    2 
 funasr/text/char_tokenizer.py                                                              |    2 
 funasr/models/encoder/opennmt_encoders/self_attention_encoder.py                           |    2 
 funasr/schedulers/noam_lr.py                                                               |    2 
 funasr/bin/sv_infer.py                                                                     |   35 ---
 funasr/datasets/small_datasets/collate_fn.py                                               |    5 
 funasr/models/seq_rnn_lm.py                                                                |    2 
 funasr/fileio/read_text.py                                                                 |    3 
 funasr/iterators/chunk_iter_factory.py                                                     |    2 
 funasr/runtime/python/libtorch/funasr_torch/utils/utils.py                                 |    3 
 funasr/fileio/sound_scp.py                                                                 |    3 
 funasr/samplers/build_batch_sampler.py                                                     |    4 
 funasr/iterators/multiple_iter_factory.py                                                  |    2 
 funasr/models/e2e_asr_mfcca.py                                                             |    2 
 funasr/models/e2e_sa_asr.py                                                                |    2 
 funasr/samplers/folded_batch_sampler.py                                                    |    2 
 funasr/models/encoder/mfcca_encoder.py                                                     |    2 
 funasr/samplers/unsorted_batch_sampler.py                                                  |    2 
 funasr/build_utils/build_streaming_iterator.py                                             |    2 
 funasr/iterators/sequence_iter_factory.py                                                  |    2 
 funasr/models/frontend/fused.py                                                            |    2 
 funasr/datasets/preprocessor.py                                                            |   11 -
 funasr/models/frontend/s3prl.py                                                            |    2 
 funasr/fileio/npy_scp.py                                                                   |    3 
 funasr/models/e2e_uni_asr.py                                                               |    2 
 funasr/models/encoder/opennmt_encoders/fsmn_encoder.py                                     |    1 
 funasr/layers/sinc_conv.py                                                                 |    4 
 funasr/runtime/python/onnxruntime/funasr_onnx/utils/frontend.py                            |    2 
 funasr/tasks/sv.py                                                                         |    9 
 funasr/models/decoder/contextual_decoder.py                                                |    2 
 funasr/tasks/data2vec.py                                                                   |    8 
 funasr/tasks/asr.py                                                                        |   22 --
 funasr/models/frontend/wav_frontend.py                                                     |    4 
 funasr/runtime/triton_gpu/model_repo_paraformer_large_offline/feature_extractor/1/model.py |    1 
 funasr/schedulers/tri_stage_scheduler.py                                                   |    2 
 funasr/bin/tp_infer.py                                                                     |    3 
 funasr/models/frontend/default.py                                                          |    3 
 funasr/bin/tokenize_text.py                                                                |    2 
 funasr/models/ctc.py                                                                       |    7 
 funasr/bin/asr_inference_launch.py                                                         |    9 
 funasr/runtime/python/libtorch/funasr_torch/utils/frontend.py                              |    2 
 120 files changed, 18 insertions(+), 621 deletions(-)

diff --git a/egs/alimeeting/sa_asr/local/format_wav_scp.py b/egs/alimeeting/sa_asr/local/format_wav_scp.py
index 1fd63d6..cb0eac3 100755
--- a/egs/alimeeting/sa_asr/local/format_wav_scp.py
+++ b/egs/alimeeting/sa_asr/local/format_wav_scp.py
@@ -11,7 +11,6 @@
 import resampy
 import soundfile
 from tqdm import tqdm
-from typeguard import check_argument_types
 
 from funasr.utils.cli_utils import get_commandline_args
 from funasr.fileio.read_text import read_2column_text
@@ -31,7 +30,6 @@
     (3, 4, 5)
 
     """
-    assert check_argument_types()
     if integers.strip() in ("none", "None", "NONE", "null", "Null", "NULL"):
         return None
     return tuple(map(int, integers.strip().split(",")))
diff --git a/funasr/bin/asr_infer.py b/funasr/bin/asr_infer.py
index a537a73..259a286 100644
--- a/funasr/bin/asr_infer.py
+++ b/funasr/bin/asr_infer.py
@@ -22,9 +22,7 @@
 import requests
 import torch
 from packaging.version import parse as V
-from typeguard import check_argument_types
-from typeguard import check_return_type
-from  funasr.build_utils.build_model_from_file import build_model_from_file
+from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.models.e2e_asr_contextual_paraformer import NeatContextualParaformer
 from funasr.models.e2e_asr_paraformer import BiCifParaformer, ContextualParaformer
 from funasr.models.frontend.wav_frontend import WavFrontend, WavFrontendOnline
@@ -78,7 +76,6 @@
             frontend_conf: dict = None,
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build ASR model
         scorers = {}
@@ -192,7 +189,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -248,7 +244,6 @@
                 text = None
             results.append((text, token, token_int, hyp))
 
-        assert check_return_type(results)
         return results
 
 
@@ -288,7 +283,6 @@
             decoding_ind: int = 0,
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build ASR model
         scorers = {}
@@ -413,7 +407,6 @@
                 text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -516,7 +509,6 @@
                                                                vad_offset=begin_time)
                 results.append((text, token, token_int, hyp, timestamp, enc_len_batch_total, lfr_factor))
 
-        # assert check_return_type(results)
         return results
 
     def generate_hotwords_list(self, hotword_list_or_file):
@@ -656,7 +648,6 @@
             hotword_list_or_file: str = None,
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build ASR model
         scorers = {}
@@ -776,7 +767,6 @@
                 text, token, token_int, hyp
 
         """
-        assert check_argument_types()
         results = []
         cache_en = cache["encoder"]
         if speech.shape[1] < 16 * 60 and cache_en["is_final"]:
@@ -871,7 +861,6 @@
 
                 results.append(postprocessed_result)
 
-        # assert check_return_type(results)
         return results
 
 
@@ -912,7 +901,6 @@
             frontend_conf: dict = None,
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build ASR model
         scorers = {}
@@ -1036,7 +1024,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -1104,7 +1091,6 @@
                 text = None
             results.append((text, token, token_int, hyp))
 
-        assert check_return_type(results)
         return results
 
 
@@ -1143,7 +1129,6 @@
             streaming: bool = False,
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build ASR model
         scorers = {}
@@ -1248,7 +1233,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
         # Input as audio signal
         if isinstance(speech, np.ndarray):
             speech = torch.tensor(speech)
@@ -1298,7 +1282,6 @@
                 text = None
             results.append((text, token, token_int, hyp))
 
-        assert check_return_type(results)
         return results
 
 
@@ -1355,7 +1338,6 @@
         """Construct a Speech2Text object."""
         super().__init__()
 
-        assert check_argument_types()
         asr_model, asr_train_args = build_model_from_file(
             asr_train_config, asr_model_file, cmvn_file, device
         )
@@ -1534,7 +1516,6 @@
         Returns:
             nbest_hypothesis: N-best hypothesis.
         """
-        assert check_argument_types()
 
         if isinstance(speech, np.ndarray):
             speech = torch.tensor(speech)
@@ -1566,7 +1547,6 @@
         Returns:
             nbest_hypothesis: N-best hypothesis.
         """
-        assert check_argument_types()
 
         if isinstance(speech, np.ndarray):
             speech = torch.tensor(speech)
@@ -1608,35 +1588,8 @@
                 text = None
             results.append((text, token, token_int, hyp))
 
-            assert check_return_type(results)
 
         return results
-
-    @staticmethod
-    def from_pretrained(
-            model_tag: Optional[str] = None,
-            **kwargs: Optional[Any],
-    ) -> Speech2Text:
-        """Build Speech2Text instance from the pretrained model.
-        Args:
-            model_tag: Model tag of the pretrained models.
-        Return:
-            : Speech2Text instance.
-        """
-        if model_tag is not None:
-            try:
-                from espnet_model_zoo.downloader import ModelDownloader
-
-            except ImportError:
-                logging.error(
-                    "`espnet_model_zoo` is not installed. "
-                    "Please install via `pip install -U espnet_model_zoo`."
-                )
-                raise
-            d = ModelDownloader()
-            kwargs.update(**d.download_and_unpack(model_tag))
-
-        return Speech2TextTransducer(**kwargs)
 
 
 class Speech2TextSAASR:
@@ -1675,7 +1628,6 @@
             frontend_conf: dict = None,
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build ASR model
         scorers = {}
@@ -1793,7 +1745,6 @@
             text, text_id, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -1886,5 +1837,4 @@
 
             results.append((text, text_id, token, token_int, hyp))
 
-        assert check_return_type(results)
         return results
diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
index 5d1b804..37a5fe4 100644
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -21,7 +21,6 @@
 import torchaudio
 import soundfile
 import yaml
-from typeguard import check_argument_types
 
 from funasr.bin.asr_infer import Speech2Text
 from funasr.bin.asr_infer import Speech2TextMFCCA
@@ -80,7 +79,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
     if batch_size > 1:
@@ -240,7 +238,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
 
@@ -481,7 +478,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
 
@@ -749,7 +745,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
 
     if word_lm_train_config is not None:
         raise NotImplementedError("Word LM is not implemented")
@@ -957,7 +952,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
     if batch_size > 1:
@@ -1126,7 +1120,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
     if batch_size > 1:
@@ -1314,7 +1307,6 @@
         right_context: Number of frames in right context AFTER subsampling.
         display_partial_hypotheses: Whether to display partial hypotheses.
     """
-    assert check_argument_types()
 
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
@@ -1464,7 +1456,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if word_lm_train_config is not None:
diff --git a/funasr/bin/diar_infer.py b/funasr/bin/diar_infer.py
index 7c41b60..3efa641 100755
--- a/funasr/bin/diar_infer.py
+++ b/funasr/bin/diar_infer.py
@@ -15,7 +15,6 @@
 import torch
 from scipy.ndimage import median_filter
 from torch.nn import functional as F
-from typeguard import check_argument_types
 
 from funasr.models.frontend.wav_frontend import WavFrontendMel23
 from funasr.tasks.diar import DiarTask
@@ -45,7 +44,6 @@
             device: str = "cpu",
             dtype: str = "float32",
     ):
-        assert check_argument_types()
 
         # 1. Build Diarization model
         diar_model, diar_train_args = build_model_from_file(
@@ -88,7 +86,6 @@
             diarization results
 
         """
-        assert check_argument_types()
         # Input as audio signal
         if isinstance(speech, np.ndarray):
             speech = torch.tensor(speech)
@@ -106,36 +103,6 @@
         results = self.diar_model.estimate_sequential(**batch)
 
         return results
-
-    @staticmethod
-    def from_pretrained(
-            model_tag: Optional[str] = None,
-            **kwargs: Optional[Any],
-    ):
-        """Build Speech2Diarization instance from the pretrained model.
-
-        Args:
-            model_tag (Optional[str]): Model tag of the pretrained models.
-                Currently, the tags of espnet_model_zoo are supported.
-
-        Returns:
-            Speech2Diarization: Speech2Diarization instance.
-
-        """
-        if model_tag is not None:
-            try:
-                from espnet_model_zoo.downloader import ModelDownloader
-
-            except ImportError:
-                logging.error(
-                    "`espnet_model_zoo` is not installed. "
-                    "Please install via `pip install -U espnet_model_zoo`."
-                )
-                raise
-            d = ModelDownloader()
-            kwargs.update(**d.download_and_unpack(model_tag))
-
-        return Speech2DiarizationEEND(**kwargs)
 
 
 class Speech2DiarizationSOND:
@@ -163,7 +130,6 @@
             smooth_size: int = 83,
             dur_threshold: float = 10,
     ):
-        assert check_argument_types()
 
         # TODO: 1. Build Diarization model
         diar_model, diar_train_args = build_model_from_file(
@@ -283,7 +249,6 @@
             diarization results for each speaker
 
         """
-        assert check_argument_types()
         # Input as audio signal
         if isinstance(speech, np.ndarray):
             speech = torch.tensor(speech)
@@ -305,33 +270,3 @@
         results, pse_labels = self.post_processing(logits, profile.shape[1], output_format)
 
         return results, pse_labels
-
-    @staticmethod
-    def from_pretrained(
-            model_tag: Optional[str] = None,
-            **kwargs: Optional[Any],
-    ):
-        """Build Speech2Xvector instance from the pretrained model.
-
-        Args:
-            model_tag (Optional[str]): Model tag of the pretrained models.
-                Currently, the tags of espnet_model_zoo are supported.
-
-        Returns:
-            Speech2Xvector: Speech2Xvector instance.
-
-        """
-        if model_tag is not None:
-            try:
-                from espnet_model_zoo.downloader import ModelDownloader
-
-            except ImportError:
-                logging.error(
-                    "`espnet_model_zoo` is not installed. "
-                    "Please install via `pip install -U espnet_model_zoo`."
-                )
-                raise
-            d = ModelDownloader()
-            kwargs.update(**d.download_and_unpack(model_tag))
-
-        return Speech2DiarizationSOND(**kwargs)
diff --git a/funasr/bin/diar_inference_launch.py b/funasr/bin/diar_inference_launch.py
index 820217b..03c9659 100755
--- a/funasr/bin/diar_inference_launch.py
+++ b/funasr/bin/diar_inference_launch.py
@@ -18,7 +18,6 @@
 import soundfile
 import torch
 from scipy.signal import medfilt
-from typeguard import check_argument_types
 
 from funasr.bin.diar_infer import Speech2DiarizationSOND, Speech2DiarizationEEND
 from funasr.datasets.iterable_dataset import load_bytes
@@ -52,7 +51,6 @@
         mode: str = "sond",
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
     if batch_size > 1:
@@ -233,7 +231,6 @@
         param_dict: Optional[dict] = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
     if batch_size > 1:
diff --git a/funasr/bin/lm_inference_launch.py b/funasr/bin/lm_inference_launch.py
index c8482b8..236a923 100644
--- a/funasr/bin/lm_inference_launch.py
+++ b/funasr/bin/lm_inference_launch.py
@@ -15,7 +15,6 @@
 import numpy as np
 import torch
 from torch.nn.parallel import data_parallel
-from typeguard import check_argument_types
 
 from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.build_utils.build_streaming_iterator import build_streaming_iterator
@@ -50,7 +49,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
 
diff --git a/funasr/bin/punc_inference_launch.py b/funasr/bin/punc_inference_launch.py
index 8fc15f0..5d917f5 100755
--- a/funasr/bin/punc_inference_launch.py
+++ b/funasr/bin/punc_inference_launch.py
@@ -14,7 +14,6 @@
 from typing import Union
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.bin.punc_infer import Text2Punc, Text2PuncVADRealtime
 from funasr.torch_utils.set_all_random_seed import set_all_random_seed
@@ -38,7 +37,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     logging.basicConfig(
         level=log_level,
         format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
@@ -118,7 +116,6 @@
         param_dict: dict = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
 
diff --git a/funasr/bin/sv_infer.py b/funasr/bin/sv_infer.py
index 6e861da..346440a 100755
--- a/funasr/bin/sv_infer.py
+++ b/funasr/bin/sv_infer.py
@@ -12,8 +12,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.torch_utils.device_funcs import to_device
@@ -42,7 +40,6 @@
             streaming: bool = False,
             embedding_node: str = "resnet1_dense",
     ):
-        assert check_argument_types()
 
         # TODO: 1. Build SV model
         sv_model, sv_train_args = build_model_from_file(
@@ -108,7 +105,6 @@
             embedding, ref_embedding, similarity_score
 
         """
-        assert check_argument_types()
         self.sv_model.eval()
         embedding = self.calculate_embedding(speech)
         ref_emb, score = None, None
@@ -117,35 +113,4 @@
             score = torch.cosine_similarity(embedding, ref_emb)
 
         results = (embedding, ref_emb, score)
-        assert check_return_type(results)
         return results
-
-    @staticmethod
-    def from_pretrained(
-            model_tag: Optional[str] = None,
-            **kwargs: Optional[Any],
-    ):
-        """Build Speech2Xvector instance from the pretrained model.
-
-        Args:
-            model_tag (Optional[str]): Model tag of the pretrained models.
-                Currently, the tags of espnet_model_zoo are supported.
-
-        Returns:
-            Speech2Xvector: Speech2Xvector instance.
-
-        """
-        if model_tag is not None:
-            try:
-                from espnet_model_zoo.downloader import ModelDownloader
-
-            except ImportError:
-                logging.error(
-                    "`espnet_model_zoo` is not installed. "
-                    "Please install via `pip install -U espnet_model_zoo`."
-                )
-                raise
-            d = ModelDownloader()
-            kwargs.update(**d.download_and_unpack(model_tag))
-
-        return Speech2Xvector(**kwargs)
diff --git a/funasr/bin/sv_inference_launch.py b/funasr/bin/sv_inference_launch.py
index d165736..2f9e276 100755
--- a/funasr/bin/sv_inference_launch.py
+++ b/funasr/bin/sv_inference_launch.py
@@ -15,7 +15,6 @@
 import numpy as np
 import torch
 from kaldiio import WriteHelper
-from typeguard import check_argument_types
 
 from funasr.bin.sv_infer import Speech2Xvector
 from funasr.build_utils.build_streaming_iterator import build_streaming_iterator
@@ -46,7 +45,6 @@
         param_dict: Optional[dict] = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
 
@@ -79,10 +77,7 @@
         embedding_node=embedding_node
     )
     logging.info("speech2xvector_kwargs: {}".format(speech2xvector_kwargs))
-    speech2xvector = Speech2Xvector.from_pretrained(
-        model_tag=model_tag,
-        **speech2xvector_kwargs,
-    )
+    speech2xvector = Speech2Xvector(**speech2xvector_kwargs)
     speech2xvector.sv_model.eval()
 
     def _forward(
diff --git a/funasr/bin/tokenize_text.py b/funasr/bin/tokenize_text.py
index dc565d0..6ec83a8 100755
--- a/funasr/bin/tokenize_text.py
+++ b/funasr/bin/tokenize_text.py
@@ -7,7 +7,6 @@
 from typing import List
 from typing import Optional
 
-from typeguard import check_argument_types
 
 from funasr.utils.cli_utils import get_commandline_args
 from funasr.text.build_tokenizer import build_tokenizer
@@ -81,7 +80,6 @@
     cleaner: Optional[str],
     g2p: Optional[str],
 ):
-    assert check_argument_types()
 
     logging.basicConfig(
         level=log_level,
diff --git a/funasr/bin/tp_infer.py b/funasr/bin/tp_infer.py
index 213c018..ede579c 100644
--- a/funasr/bin/tp_infer.py
+++ b/funasr/bin/tp_infer.py
@@ -9,7 +9,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.models.frontend.wav_frontend import WavFrontend
 from funasr.text.token_id_converter import TokenIDConverter
@@ -26,7 +25,6 @@
             dtype: str = "float32",
             **kwargs,
     ):
-        assert check_argument_types()
         # 1. Build ASR model
         tp_model, tp_train_args = build_model_from_file(
             timestamp_infer_config, timestamp_model_file, cmvn_file=None, device=device, task_name="asr", mode="tp"
@@ -64,7 +62,6 @@
             speech_lengths: Union[torch.Tensor, np.ndarray] = None,
             text_lengths: Union[torch.Tensor, np.ndarray] = None
     ):
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
diff --git a/funasr/bin/tp_inference_launch.py b/funasr/bin/tp_inference_launch.py
index 3f8df0c..6c10254 100644
--- a/funasr/bin/tp_inference_launch.py
+++ b/funasr/bin/tp_inference_launch.py
@@ -13,7 +13,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 
 from funasr.bin.tp_infer import Speech2Timestamp
 from funasr.build_utils.build_streaming_iterator import build_streaming_iterator
@@ -47,7 +46,6 @@
         seg_dict_file: Optional[str] = None,
         **kwargs,
 ):
-    assert check_argument_types()
     ncpu = kwargs.get("ncpu", 1)
     torch.set_num_threads(ncpu)
 
diff --git a/funasr/bin/vad_infer.py b/funasr/bin/vad_infer.py
index f888bb4..c60a8f1 100644
--- a/funasr/bin/vad_infer.py
+++ b/funasr/bin/vad_infer.py
@@ -13,7 +13,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 
 from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.models.frontend.wav_frontend import WavFrontend, WavFrontendOnline
@@ -42,7 +41,6 @@
             dtype: str = "float32",
             **kwargs,
     ):
-        assert check_argument_types()
 
         # 1. Build vad model
         vad_model, vad_infer_args = build_model_from_file(
@@ -76,7 +74,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
@@ -149,7 +146,6 @@
             text, token, token_int, hyp
 
         """
-        assert check_argument_types()
 
         # Input as audio signal
         if isinstance(speech, np.ndarray):
diff --git a/funasr/bin/vad_inference_launch.py b/funasr/bin/vad_inference_launch.py
index 829f157..47af011 100644
--- a/funasr/bin/vad_inference_launch.py
+++ b/funasr/bin/vad_inference_launch.py
@@ -18,7 +18,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 from funasr.build_utils.build_streaming_iterator import build_streaming_iterator
 from funasr.fileio.datadir_writer import DatadirWriter
 from funasr.torch_utils.set_all_random_seed import set_all_random_seed
@@ -47,7 +46,6 @@
         num_workers: int = 1,
         **kwargs,
 ):
-    assert check_argument_types()
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
 
@@ -148,7 +146,6 @@
         num_workers: int = 1,
         **kwargs,
 ):
-    assert check_argument_types()
 
     logging.basicConfig(
         level=log_level,
diff --git a/funasr/build_utils/build_model_from_file.py b/funasr/build_utils/build_model_from_file.py
index 8fd4e46..26542cd 100644
--- a/funasr/build_utils/build_model_from_file.py
+++ b/funasr/build_utils/build_model_from_file.py
@@ -6,7 +6,6 @@
 
 import torch
 import yaml
-from typeguard import check_argument_types
 
 from funasr.build_utils.build_model import build_model
 from funasr.models.base_model import FunASRModel
@@ -30,7 +29,6 @@
         device: Device type, "cpu", "cuda", or "cuda:N".
 
     """
-    assert check_argument_types()
     if config_file is None:
         assert model_file is not None, (
             "The argument 'model_file' must be provided "
diff --git a/funasr/build_utils/build_streaming_iterator.py b/funasr/build_utils/build_streaming_iterator.py
index 1b16cf4..02fc263 100644
--- a/funasr/build_utils/build_streaming_iterator.py
+++ b/funasr/build_utils/build_streaming_iterator.py
@@ -1,6 +1,5 @@
 import numpy as np
 from torch.utils.data import DataLoader
-from typeguard import check_argument_types
 
 from funasr.datasets.iterable_dataset import IterableESPnetDataset
 from funasr.datasets.small_datasets.collate_fn import CommonCollateFn
@@ -23,7 +22,6 @@
         train: bool = False,
 ) -> DataLoader:
     """Build DataLoader using iterable dataset"""
-    assert check_argument_types()
 
     # preprocess
     if preprocess_fn is not None:
diff --git a/funasr/build_utils/build_sv_model.py b/funasr/build_utils/build_sv_model.py
index c0f1ae8..55df75a 100644
--- a/funasr/build_utils/build_sv_model.py
+++ b/funasr/build_utils/build_sv_model.py
@@ -1,7 +1,6 @@
 import logging
 
 import torch
-from typeguard import check_return_type
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.layers.global_mvn import GlobalMVN
@@ -254,5 +253,4 @@
     if args.init is not None:
         initialize(model, args.init)
 
-    assert check_return_type(model)
     return model
diff --git a/funasr/build_utils/build_trainer.py b/funasr/build_utils/build_trainer.py
index aff99b5..03aa780 100644
--- a/funasr/build_utils/build_trainer.py
+++ b/funasr/build_utils/build_trainer.py
@@ -25,7 +25,6 @@
 import torch
 import torch.nn
 import torch.optim
-from typeguard import check_argument_types
 
 from funasr.iterators.abs_iter_factory import AbsIterFactory
 from funasr.main_funcs.average_nbest_models import average_nbest_models
@@ -118,7 +117,6 @@
 
     def build_options(self, args: argparse.Namespace) -> TrainerOptions:
         """Build options consumed by train(), eval()"""
-        assert check_argument_types()
         return build_dataclass(TrainerOptions, args)
 
     @classmethod
@@ -156,7 +154,6 @@
 
     def run(self) -> None:
         """Perform training. This method performs the main process of training."""
-        assert check_argument_types()
         # NOTE(kamo): Don't check the type more strictly as far trainer_options
         model = self.model
         optimizers = self.optimizers
@@ -522,7 +519,6 @@
             options: TrainerOptions,
             distributed_option: DistributedOption,
     ) -> Tuple[bool, bool]:
-        assert check_argument_types()
 
         grad_noise = options.grad_noise
         accum_grad = options.accum_grad
@@ -758,7 +754,6 @@
             options: TrainerOptions,
             distributed_option: DistributedOption,
     ) -> None:
-        assert check_argument_types()
         ngpu = options.ngpu
         # no_forward_run = options.no_forward_run
         distributed = distributed_option.distributed
diff --git a/funasr/datasets/collate_fn.py b/funasr/datasets/collate_fn.py
index d34d610..cbc1f0b 100644
--- a/funasr/datasets/collate_fn.py
+++ b/funasr/datasets/collate_fn.py
@@ -6,8 +6,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.modules.nets_utils import pad_list
 
@@ -22,7 +20,6 @@
             not_sequence: Collection[str] = (),
             max_sample_size=None
     ):
-        assert check_argument_types()
         self.float_pad_value = float_pad_value
         self.int_pad_value = int_pad_value
         self.not_sequence = set(not_sequence)
@@ -53,7 +50,6 @@
 ) -> Tuple[List[str], Dict[str, torch.Tensor]]:
     """Concatenate ndarray-list to an array and convert to torch.Tensor.
     """
-    assert check_argument_types()
     uttids = [u for u, _ in data]
     data = [d for _, d in data]
 
@@ -79,7 +75,6 @@
             output[key + "_lengths"] = lens
 
     output = (uttids, output)
-    assert check_return_type(output)
     return output
 
 def crop_to_max_size(feature, target_size):
@@ -99,7 +94,6 @@
         not_sequence: Collection[str] = (),
 ) -> Tuple[List[str], Dict[str, torch.Tensor]]:
     # mainly for pre-training
-    assert check_argument_types()
     uttids = [u for u, _ in data]
     data = [d for _, d in data]
 
@@ -131,5 +125,4 @@
             output[key + "_lengths"] = lens
 
     output = (uttids, output)
-    assert check_return_type(output)
     return output
\ No newline at end of file
diff --git a/funasr/datasets/dataset.py b/funasr/datasets/dataset.py
index 979479c..407f6aa 100644
--- a/funasr/datasets/dataset.py
+++ b/funasr/datasets/dataset.py
@@ -23,8 +23,6 @@
 import numpy as np
 import torch
 from torch.utils.data.dataset import Dataset
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.fileio.npy_scp import NpyScpReader
 from funasr.fileio.rand_gen_dataset import FloatRandomGenerateDataset
@@ -37,7 +35,6 @@
 
 class AdapterForSoundScpReader(collections.abc.Mapping):
     def __init__(self, loader, dtype=None):
-        assert check_argument_types()
         self.loader = loader
         self.dtype = dtype
         self.rate = None
@@ -284,7 +281,6 @@
         max_cache_fd: int = 0,
         dest_sample_rate: int = 16000,
     ):
-        assert check_argument_types()
         if len(path_name_type_list) == 0:
             raise ValueError(
                 '1 or more elements are required for "path_name_type_list"'
@@ -379,7 +375,6 @@
         return _mes
 
     def __getitem__(self, uid: Union[str, int]) -> Tuple[str, Dict[str, np.ndarray]]:
-        assert check_argument_types()
 
         # Change integer-id to string-id
         if isinstance(uid, int):
@@ -444,5 +439,4 @@
             self.cache[uid] = data
 
         retval = uid, data
-        assert check_return_type(retval)
         return retval
diff --git a/funasr/datasets/iterable_dataset.py b/funasr/datasets/iterable_dataset.py
index d240d93..6398e0c 100644
--- a/funasr/datasets/iterable_dataset.py
+++ b/funasr/datasets/iterable_dataset.py
@@ -16,7 +16,6 @@
 import torchaudio
 import soundfile
 from torch.utils.data.dataset import IterableDataset
-from typeguard import check_argument_types
 import os.path
 
 from funasr.datasets.dataset import ESPnetDataset
@@ -121,7 +120,6 @@
             int_dtype: str = "long",
             key_file: str = None,
     ):
-        assert check_argument_types()
         if len(path_name_type_list) == 0:
             raise ValueError(
                 '1 or more elements are required for "path_name_type_list"'
diff --git a/funasr/datasets/large_datasets/build_dataloader.py b/funasr/datasets/large_datasets/build_dataloader.py
index aa5d9be..7a1a906 100644
--- a/funasr/datasets/large_datasets/build_dataloader.py
+++ b/funasr/datasets/large_datasets/build_dataloader.py
@@ -6,7 +6,6 @@
 
 import sentencepiece as spm
 from torch.utils.data import DataLoader
-from typeguard import check_argument_types
 
 from funasr.datasets.large_datasets.dataset import Dataset
 from funasr.iterators.abs_iter_factory import AbsIterFactory
@@ -43,7 +42,6 @@
 
 class SentencepiecesTokenizer(AbsTokenizer):
     def __init__(self, model: Union[Path, str]):
-        assert check_argument_types()
         self.model = str(model)
         self.sp = None
 
diff --git a/funasr/datasets/preprocessor.py b/funasr/datasets/preprocessor.py
index 758c750..cb4288c 100644
--- a/funasr/datasets/preprocessor.py
+++ b/funasr/datasets/preprocessor.py
@@ -11,8 +11,6 @@
 import numpy as np
 import scipy.signal
 import soundfile
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.text.build_tokenizer import build_tokenizer
 from funasr.text.cleaner import TextCleaner
@@ -268,7 +266,6 @@
     def _speech_process(
             self, data: Dict[str, Union[str, np.ndarray]]
     ) -> Dict[str, Union[str, np.ndarray]]:
-        assert check_argument_types()
         if self.speech_name in data:
             if self.train and (self.rirs is not None or self.noises is not None):
                 speech = data[self.speech_name]
@@ -355,7 +352,6 @@
                 speech = data[self.speech_name]
                 ma = np.max(np.abs(speech))
                 data[self.speech_name] = speech * self.speech_volume_normalize / ma
-        assert check_return_type(data)
         return data
 
     def _text_process(
@@ -372,13 +368,11 @@
                 tokens = self.tokenizer.text2tokens(text)
             text_ints = self.token_id_converter.tokens2ids(tokens)
             data[self.text_name] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
     def __call__(
             self, uid: str, data: Dict[str, Union[str, np.ndarray]]
     ) -> Dict[str, np.ndarray]:
-        assert check_argument_types()
 
         data = self._speech_process(data)
         data = self._text_process(data)
@@ -445,7 +439,6 @@
                 tokens = self.tokenizer.text2tokens(text)
             text_ints = self.token_id_converter.tokens2ids(tokens)
             data[self.text_name] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
 
@@ -502,13 +495,11 @@
                 tokens = self.tokenizer.text2tokens(text)
                 text_ints = self.token_id_converter.tokens2ids(tokens)
                 data[text_n] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
     def __call__(
             self, uid: str, data: Dict[str, Union[str, np.ndarray]]
     ) -> Dict[str, np.ndarray]:
-        assert check_argument_types()
 
         if self.speech_name in data:
             # Nothing now: candidates:
@@ -612,7 +603,6 @@
                 tokens = self.tokenizer[i].text2tokens(text)
                 text_ints = self.token_id_converter[i].tokens2ids(tokens)
                 data[text_name] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
 class CodeMixTokenizerCommonPreprocessor(CommonPreprocessor):
@@ -690,7 +680,6 @@
     def __call__(
             self, uid: str, data: Dict[str, Union[list, str, np.ndarray]]
     ) -> Dict[str, Union[list, np.ndarray]]:
-        assert check_argument_types()
         # Split words.
         if isinstance(data[self.text_name], str):
             split_text = self.split_words(data[self.text_name])
diff --git a/funasr/datasets/small_datasets/collate_fn.py b/funasr/datasets/small_datasets/collate_fn.py
index 573f581..5fd4162 100644
--- a/funasr/datasets/small_datasets/collate_fn.py
+++ b/funasr/datasets/small_datasets/collate_fn.py
@@ -6,8 +6,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.modules.nets_utils import pad_list
 
@@ -22,7 +20,6 @@
             not_sequence: Collection[str] = (),
             max_sample_size=None
     ):
-        assert check_argument_types()
         self.float_pad_value = float_pad_value
         self.int_pad_value = int_pad_value
         self.not_sequence = set(not_sequence)
@@ -53,7 +50,6 @@
 ) -> Tuple[List[str], Dict[str, torch.Tensor]]:
     """Concatenate ndarray-list to an array and convert to torch.Tensor.
     """
-    assert check_argument_types()
     uttids = [u for u, _ in data]
     data = [d for _, d in data]
 
@@ -79,7 +75,6 @@
             output[key + "_lengths"] = lens
 
     output = (uttids, output)
-    assert check_return_type(output)
     return output
 
 def crop_to_max_size(feature, target_size):
diff --git a/funasr/datasets/small_datasets/dataset.py b/funasr/datasets/small_datasets/dataset.py
index 123f109..bee9f50 100644
--- a/funasr/datasets/small_datasets/dataset.py
+++ b/funasr/datasets/small_datasets/dataset.py
@@ -15,8 +15,6 @@
 import numpy as np
 import torch
 from torch.utils.data.dataset import Dataset
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.fileio.npy_scp import NpyScpReader
 from funasr.fileio.sound_scp import SoundScpReader
@@ -24,7 +22,6 @@
 
 class AdapterForSoundScpReader(collections.abc.Mapping):
     def __init__(self, loader, dtype=None):
-        assert check_argument_types()
         self.loader = loader
         self.dtype = dtype
         self.rate = None
@@ -112,7 +109,6 @@
             speed_perturb: Union[list, tuple] = None,
             mode: str = "train",
     ):
-        assert check_argument_types()
         if len(path_name_type_list) == 0:
             raise ValueError(
                 '1 or more elements are required for "path_name_type_list"'
@@ -207,7 +203,6 @@
         return _mes
 
     def __getitem__(self, uid: Union[str, int]) -> Tuple[str, Dict[str, np.ndarray]]:
-        assert check_argument_types()
 
         # Change integer-id to string-id
         if isinstance(uid, int):
@@ -265,5 +260,4 @@
             data[name] = value
 
         retval = uid, data
-        assert check_return_type(retval)
         return retval
diff --git a/funasr/datasets/small_datasets/length_batch_sampler.py b/funasr/datasets/small_datasets/length_batch_sampler.py
index 8ee8bdc..28404e3 100644
--- a/funasr/datasets/small_datasets/length_batch_sampler.py
+++ b/funasr/datasets/small_datasets/length_batch_sampler.py
@@ -4,7 +4,6 @@
 from typing import Tuple
 from typing import Union
 
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import load_num_sequence_text
 from funasr.samplers.abs_sampler import AbsSampler
@@ -21,7 +20,6 @@
         drop_last: bool = False,
         padding: bool = True,
     ):
-        assert check_argument_types()
         assert batch_bins > 0
         if sort_batch != "ascending" and sort_batch != "descending":
             raise ValueError(
diff --git a/funasr/datasets/small_datasets/preprocessor.py b/funasr/datasets/small_datasets/preprocessor.py
index d80f48a..0ebf325 100644
--- a/funasr/datasets/small_datasets/preprocessor.py
+++ b/funasr/datasets/small_datasets/preprocessor.py
@@ -10,8 +10,6 @@
 import numpy as np
 import scipy.signal
 import soundfile
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.text.build_tokenizer import build_tokenizer
 from funasr.text.cleaner import TextCleaner
@@ -260,7 +258,6 @@
     def _speech_process(
             self, data: Dict[str, Union[str, np.ndarray]]
     ) -> Dict[str, Union[str, np.ndarray]]:
-        assert check_argument_types()
         if self.speech_name in data:
             if self.train and (self.rirs is not None or self.noises is not None):
                 speech = data[self.speech_name]
@@ -347,7 +344,6 @@
                 speech = data[self.speech_name]
                 ma = np.max(np.abs(speech))
                 data[self.speech_name] = speech * self.speech_volume_normalize / ma
-        assert check_return_type(data)
         return data
 
     def _text_process(
@@ -365,13 +361,11 @@
                 tokens = self.tokenizer.text2tokens(text)
             text_ints = self.token_id_converter.tokens2ids(tokens)
             data[self.text_name] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
     def __call__(
             self, uid: str, data: Dict[str, Union[str, np.ndarray]]
     ) -> Dict[str, np.ndarray]:
-        assert check_argument_types()
 
         data = self._speech_process(data)
         data = self._text_process(data)
@@ -439,7 +433,6 @@
                 tokens = self.tokenizer.text2tokens(text)
             text_ints = self.token_id_converter.tokens2ids(tokens)
             data[self.text_name] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
 
@@ -496,13 +489,11 @@
                 tokens = self.tokenizer.text2tokens(text)
                 text_ints = self.token_id_converter.tokens2ids(tokens)
                 data[text_n] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
     def __call__(
             self, uid: str, data: Dict[str, Union[str, np.ndarray]]
     ) -> Dict[str, np.ndarray]:
-        assert check_argument_types()
 
         if self.speech_name in data:
             # Nothing now: candidates:
@@ -606,7 +597,6 @@
                 tokens = self.tokenizer[i].text2tokens(text)
                 text_ints = self.token_id_converter[i].tokens2ids(tokens)
                 data[text_name] = np.array(text_ints, dtype=np.int64)
-        assert check_return_type(data)
         return data
 
 
@@ -685,7 +675,6 @@
     def __call__(
             self, uid: str, data: Dict[str, Union[list, str, np.ndarray]]
     ) -> Dict[str, Union[list, np.ndarray]]:
-        assert check_argument_types()
         # Split words.
         if isinstance(data[self.text_name], str):
             split_text = self.split_words(data[self.text_name])
diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index b141532..a68724a 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -1,7 +1,6 @@
 import json
 from typing import Union, Dict
 from pathlib import Path
-from typeguard import check_argument_types
 
 import os
 import logging
@@ -26,7 +25,6 @@
         calib_num: int = 200,
         model_revision: str = None,
     ):
-        assert check_argument_types()
         self.set_all_random_seed(0)
 
         self.cache_dir = cache_dir
diff --git a/funasr/fileio/datadir_writer.py b/funasr/fileio/datadir_writer.py
index 67ec61c..e555ebc 100644
--- a/funasr/fileio/datadir_writer.py
+++ b/funasr/fileio/datadir_writer.py
@@ -2,8 +2,6 @@
 from typing import Union
 import warnings
 
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 
 class DatadirWriter:
@@ -20,7 +18,6 @@
     """
 
     def __init__(self, p: Union[Path, str]):
-        assert check_argument_types()
         self.path = Path(p)
         self.chilidren = {}
         self.fd = None
@@ -31,7 +28,6 @@
         return self
 
     def __getitem__(self, key: str) -> "DatadirWriter":
-        assert check_argument_types()
         if self.fd is not None:
             raise RuntimeError("This writer points out a file")
 
@@ -41,11 +37,9 @@
             self.has_children = True
 
         retval = self.chilidren[key]
-        assert check_return_type(retval)
         return retval
 
     def __setitem__(self, key: str, value: str):
-        assert check_argument_types()
         if self.has_children:
             raise RuntimeError("This writer points out a directory")
         if key in self.keys:
diff --git a/funasr/fileio/npy_scp.py b/funasr/fileio/npy_scp.py
index 26666b6..2bd5b58 100644
--- a/funasr/fileio/npy_scp.py
+++ b/funasr/fileio/npy_scp.py
@@ -3,7 +3,6 @@
 from typing import Union
 
 import numpy as np
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import read_2column_text
 
@@ -25,7 +24,6 @@
     """
 
     def __init__(self, outdir: Union[Path, str], scpfile: Union[Path, str]):
-        assert check_argument_types()
         self.dir = Path(outdir)
         self.dir.mkdir(parents=True, exist_ok=True)
         scpfile = Path(scpfile)
@@ -73,7 +71,6 @@
     """
 
     def __init__(self, fname: Union[Path, str]):
-        assert check_argument_types()
         self.fname = Path(fname)
         self.data = read_2column_text(fname)
 
diff --git a/funasr/fileio/rand_gen_dataset.py b/funasr/fileio/rand_gen_dataset.py
index 2faef3a..699e67a 100644
--- a/funasr/fileio/rand_gen_dataset.py
+++ b/funasr/fileio/rand_gen_dataset.py
@@ -3,7 +3,6 @@
 from typing import Union
 
 import numpy as np
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import load_num_sequence_text
 
@@ -29,7 +28,6 @@
         dtype: Union[str, np.dtype] = "float32",
         loader_type: str = "csv_int",
     ):
-        assert check_argument_types()
         shape_file = Path(shape_file)
         self.utt2shape = load_num_sequence_text(shape_file, loader_type)
         self.dtype = np.dtype(dtype)
@@ -68,7 +66,6 @@
         dtype: Union[str, np.dtype] = "int64",
         loader_type: str = "csv_int",
     ):
-        assert check_argument_types()
         shape_file = Path(shape_file)
         self.utt2shape = load_num_sequence_text(shape_file, loader_type)
         self.dtype = np.dtype(dtype)
diff --git a/funasr/fileio/read_text.py b/funasr/fileio/read_text.py
index e26e7a1..f140c31 100644
--- a/funasr/fileio/read_text.py
+++ b/funasr/fileio/read_text.py
@@ -4,7 +4,6 @@
 from typing import List
 from typing import Union
 
-from typeguard import check_argument_types
 
 
 def read_2column_text(path: Union[Path, str]) -> Dict[str, str]:
@@ -19,7 +18,6 @@
         {'key1': '/some/path/a.wav', 'key2': '/some/path/b.wav'}
 
     """
-    assert check_argument_types()
 
     data = {}
     with Path(path).open("r", encoding="utf-8") as f:
@@ -47,7 +45,6 @@
         >>> d = load_num_sequence_text('text')
         >>> np.testing.assert_array_equal(d["key1"], np.array([1, 2, 3]))
     """
-    assert check_argument_types()
     if loader_type == "text_int":
         delimiter = " "
         dtype = int
diff --git a/funasr/fileio/sound_scp.py b/funasr/fileio/sound_scp.py
index 9b25fe5..b912f1e 100644
--- a/funasr/fileio/sound_scp.py
+++ b/funasr/fileio/sound_scp.py
@@ -6,7 +6,6 @@
 import numpy as np
 import soundfile
 import librosa
-from typeguard import check_argument_types
 
 import torch
 import torchaudio
@@ -106,7 +105,6 @@
         dest_sample_rate: int = 16000,
         speed_perturb: Union[list, tuple] = None,
     ):
-        assert check_argument_types()
         self.fname = fname
         self.dtype = dtype
         self.always_2d = always_2d
@@ -179,7 +177,6 @@
         format="wav",
         dtype=None,
     ):
-        assert check_argument_types()
         self.dir = Path(outdir)
         self.dir.mkdir(parents=True, exist_ok=True)
         scpfile = Path(scpfile)
diff --git a/funasr/iterators/chunk_iter_factory.py b/funasr/iterators/chunk_iter_factory.py
index cec6370..5a54632 100644
--- a/funasr/iterators/chunk_iter_factory.py
+++ b/funasr/iterators/chunk_iter_factory.py
@@ -9,7 +9,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 
 from funasr.iterators.abs_iter_factory import AbsIterFactory
 from funasr.iterators.sequence_iter_factory import SequenceIterFactory
@@ -51,7 +50,6 @@
         collate_fn=None,
         pin_memory: bool = False,
     ):
-        assert check_argument_types()
         assert all(len(x) == 1 for x in batches), "batch-size must be 1"
 
         self.per_sample_iter_factory = SequenceIterFactory(
diff --git a/funasr/iterators/multiple_iter_factory.py b/funasr/iterators/multiple_iter_factory.py
index 088016c..3587a2a 100644
--- a/funasr/iterators/multiple_iter_factory.py
+++ b/funasr/iterators/multiple_iter_factory.py
@@ -4,7 +4,6 @@
 from typing import Iterator
 
 import numpy as np
-from typeguard import check_argument_types
 
 from funasr.iterators.abs_iter_factory import AbsIterFactory
 
@@ -16,7 +15,6 @@
         seed: int = 0,
         shuffle: bool = False,
     ):
-        assert check_argument_types()
         self.build_funcs = list(build_funcs)
         self.seed = seed
         self.shuffle = shuffle
diff --git a/funasr/iterators/sequence_iter_factory.py b/funasr/iterators/sequence_iter_factory.py
index 39d0834..41de37c 100644
--- a/funasr/iterators/sequence_iter_factory.py
+++ b/funasr/iterators/sequence_iter_factory.py
@@ -4,7 +4,6 @@
 
 import numpy as np
 from torch.utils.data import DataLoader
-from typeguard import check_argument_types
 
 from funasr.iterators.abs_iter_factory import AbsIterFactory
 from funasr.samplers.abs_sampler import AbsSampler
@@ -46,7 +45,6 @@
         collate_fn=None,
         pin_memory: bool = False,
     ):
-        assert check_argument_types()
 
         if not isinstance(batches, AbsSampler):
             self.sampler = RawSampler(batches)
diff --git a/funasr/layers/global_mvn.py b/funasr/layers/global_mvn.py
index 8e43582..b94e9ca 100644
--- a/funasr/layers/global_mvn.py
+++ b/funasr/layers/global_mvn.py
@@ -4,7 +4,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.layers.abs_normalize import AbsNormalize
@@ -28,7 +27,6 @@
         norm_vars: bool = True,
         eps: float = 1.0e-20,
     ):
-        assert check_argument_types()
         super().__init__()
         self.norm_means = norm_means
         self.norm_vars = norm_vars
diff --git a/funasr/layers/label_aggregation.py b/funasr/layers/label_aggregation.py
index 29a08a9..8366a79 100644
--- a/funasr/layers/label_aggregation.py
+++ b/funasr/layers/label_aggregation.py
@@ -1,5 +1,4 @@
 import torch
-from typeguard import check_argument_types
 from typing import Optional
 from typing import Tuple
 
@@ -13,7 +12,6 @@
         hop_length: int = 128,
         center: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
 
         self.win_length = win_length
diff --git a/funasr/layers/mask_along_axis.py b/funasr/layers/mask_along_axis.py
index e49e621..416c4ea 100644
--- a/funasr/layers/mask_along_axis.py
+++ b/funasr/layers/mask_along_axis.py
@@ -1,6 +1,5 @@
 import math
 import torch
-from typeguard import check_argument_types
 from typing import Sequence
 from typing import Union
 
@@ -147,7 +146,6 @@
         dim: Union[int, str] = "time",
         replace_with_zero: bool = True,
     ):
-        assert check_argument_types()
         if isinstance(mask_width_range, int):
             mask_width_range = (0, mask_width_range)
         if len(mask_width_range) != 2:
@@ -214,7 +212,6 @@
         dim: Union[int, str] = "time",
         replace_with_zero: bool = True,
     ):
-        assert check_argument_types()
         if isinstance(mask_width_ratio_range, float):
             mask_width_ratio_range = (0.0, mask_width_ratio_range)
         if len(mask_width_ratio_range) != 2:
@@ -283,7 +280,6 @@
         replace_with_zero: bool = True,
         lfr_rate: int = 1,
     ):
-        assert check_argument_types()
         if isinstance(mask_width_range, int):
             mask_width_range = (0, mask_width_range)
         if len(mask_width_range) != 2:
diff --git a/funasr/layers/sinc_conv.py b/funasr/layers/sinc_conv.py
index 33df97f..ab16a73 100644
--- a/funasr/layers/sinc_conv.py
+++ b/funasr/layers/sinc_conv.py
@@ -5,7 +5,6 @@
 """Sinc convolutions."""
 import math
 import torch
-from typeguard import check_argument_types
 from typing import Union
 
 
@@ -71,7 +70,6 @@
             window_func: Window function on the filter, one of ["hamming", "none"].
             fs (str, int, float): Sample rate of the input data
         """
-        assert check_argument_types()
         super().__init__()
         window_funcs = {
             "none": self.none_window,
@@ -208,7 +206,6 @@
             torch.Tensor: Filter start frequenc铆es.
             torch.Tensor: Filter stop frequencies.
         """
-        assert check_argument_types()
         # min and max bandpass edge frequencies
         min_frequency = torch.tensor(30.0)
         max_frequency = torch.tensor(fs * 0.5)
@@ -257,7 +254,6 @@
             torch.Tensor: Filter start frequenc铆es.
             torch.Tensor: Filter stop frequenc铆es.
         """
-        assert check_argument_types()
         # min and max BARK center frequencies by approximation
         min_center_frequency = torch.tensor(70.0)
         max_center_frequency = torch.tensor(fs * 0.45)
diff --git a/funasr/layers/stft.py b/funasr/layers/stft.py
index 376b5a3..dfb6919 100644
--- a/funasr/layers/stft.py
+++ b/funasr/layers/stft.py
@@ -5,7 +5,6 @@
 
 import torch
 from torch_complex.tensor import ComplexTensor
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.layers.complex_utils import is_complex
@@ -30,7 +29,6 @@
         normalized: bool = False,
         onesided: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         self.n_fft = n_fft
         if win_length is None:
diff --git a/funasr/layers/utterance_mvn.py b/funasr/layers/utterance_mvn.py
index 50f27cd..7722974 100644
--- a/funasr/layers/utterance_mvn.py
+++ b/funasr/layers/utterance_mvn.py
@@ -1,7 +1,6 @@
 from typing import Tuple
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.layers.abs_normalize import AbsNormalize
@@ -14,7 +13,6 @@
         norm_vars: bool = False,
         eps: float = 1.0e-20,
     ):
-        assert check_argument_types()
         super().__init__()
         self.norm_means = norm_means
         self.norm_vars = norm_vars
diff --git a/funasr/main_funcs/average_nbest_models.py b/funasr/main_funcs/average_nbest_models.py
index d8df949..96e1384 100644
--- a/funasr/main_funcs/average_nbest_models.py
+++ b/funasr/main_funcs/average_nbest_models.py
@@ -8,7 +8,6 @@
 from io import BytesIO
 
 import torch
-from typeguard import check_argument_types
 from typing import Collection
 
 from funasr.train.reporter import Reporter
@@ -34,7 +33,6 @@
         nbest: Number of best model files to be averaged
         suffix: A suffix added to the averaged model file name
     """
-    assert check_argument_types()
     if isinstance(nbest, int):
         nbests = [nbest]
     else:
diff --git a/funasr/main_funcs/collect_stats.py b/funasr/main_funcs/collect_stats.py
index 584b85a..ee2182c 100644
--- a/funasr/main_funcs/collect_stats.py
+++ b/funasr/main_funcs/collect_stats.py
@@ -11,7 +11,6 @@
 import torch
 from torch.nn.parallel import data_parallel
 from torch.utils.data import DataLoader
-from typeguard import check_argument_types
 
 from funasr.fileio.datadir_writer import DatadirWriter
 from funasr.fileio.npy_scp import NpyScpWriter
@@ -37,7 +36,6 @@
     This method is used before executing train().
 
     """
-    assert check_argument_types()
 
     npy_scp_writers = {}
     for itr, mode in zip([train_iter, valid_iter], ["train", "valid"]):
diff --git a/funasr/models/ctc.py b/funasr/models/ctc.py
index 64b8710..d3c10fa 100644
--- a/funasr/models/ctc.py
+++ b/funasr/models/ctc.py
@@ -2,7 +2,6 @@
 
 import torch
 import torch.nn.functional as F
-from typeguard import check_argument_types
 
 
 class CTC(torch.nn.Module):
@@ -25,7 +24,6 @@
         reduce: bool = True,
         ignore_nan_grad: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         eprojs = encoder_output_size
         self.dropout_rate = dropout_rate
@@ -41,11 +39,6 @@
             if ignore_nan_grad:
                 logging.warning("ignore_nan_grad option is not supported for warp_ctc")
             self.ctc_loss = warp_ctc.CTCLoss(size_average=True, reduce=reduce)
-
-        elif self.ctc_type == "gtnctc":
-            from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
-
-            self.ctc_loss = GTNCTCLossFunction.apply
         else:
             raise ValueError(
                 f'ctc_type must be "builtin" or "warpctc": {self.ctc_type}'
diff --git a/funasr/models/data2vec.py b/funasr/models/data2vec.py
index e5bd640..92c95cc 100644
--- a/funasr/models/data2vec.py
+++ b/funasr/models/data2vec.py
@@ -10,7 +10,6 @@
 from typing import Tuple
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.models.encoder.abs_encoder import AbsEncoder
@@ -40,7 +39,6 @@
             preencoder: Optional[AbsPreEncoder],
             encoder: AbsEncoder,
     ):
-        assert check_argument_types()
 
         super().__init__()
 
diff --git a/funasr/models/decoder/contextual_decoder.py b/funasr/models/decoder/contextual_decoder.py
index 78105ab..0e69c44 100644
--- a/funasr/models/decoder/contextual_decoder.py
+++ b/funasr/models/decoder/contextual_decoder.py
@@ -7,7 +7,6 @@
 
 from funasr.modules.streaming_utils import utils as myutils
 from funasr.models.decoder.transformer_decoder import BaseTransformerDecoder
-from typeguard import check_argument_types
 
 from funasr.modules.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt
 from funasr.modules.embedding import PositionalEncoding
@@ -126,7 +125,6 @@
         kernel_size: int = 21,
         sanm_shfit: int = 0,
     ):
-        assert check_argument_types()
         super().__init__(
             vocab_size=vocab_size,
             encoder_output_size=encoder_output_size,
diff --git a/funasr/models/decoder/rnn_decoder.py b/funasr/models/decoder/rnn_decoder.py
index 80709c9..cb119e1 100644
--- a/funasr/models/decoder/rnn_decoder.py
+++ b/funasr/models/decoder/rnn_decoder.py
@@ -3,7 +3,6 @@
 import numpy as np
 import torch
 import torch.nn.functional as F
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.modules.nets_utils import to_device
@@ -97,7 +96,6 @@
         att_conf: dict = get_default_kwargs(build_attention_list),
     ):
         # FIXME(kamo): The parts of num_spk should be refactored more more more
-        assert check_argument_types()
         if rnn_type not in {"lstm", "gru"}:
             raise ValueError(f"Not supported: rnn_type={rnn_type}")
 
diff --git a/funasr/models/decoder/rnnt_decoder.py b/funasr/models/decoder/rnnt_decoder.py
index a0fe9ea..0109cc5 100644
--- a/funasr/models/decoder/rnnt_decoder.py
+++ b/funasr/models/decoder/rnnt_decoder.py
@@ -3,7 +3,6 @@
 from typing import List, Optional, Tuple
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.modules.beam_search.beam_search_transducer import Hypothesis
 from funasr.models.specaug.specaug import SpecAug
@@ -38,7 +37,6 @@
         """Construct a RNNDecoder object."""
         super().__init__()
 
-        assert check_argument_types()
 
         if rnn_type not in ("lstm", "gru"):
             raise ValueError(f"Not supported: rnn_type={rnn_type}")
diff --git a/funasr/models/decoder/sanm_decoder.py b/funasr/models/decoder/sanm_decoder.py
index ed920bf..d83f89f 100644
--- a/funasr/models/decoder/sanm_decoder.py
+++ b/funasr/models/decoder/sanm_decoder.py
@@ -7,7 +7,6 @@
 
 from funasr.modules.streaming_utils import utils as myutils
 from funasr.models.decoder.transformer_decoder import BaseTransformerDecoder
-from typeguard import check_argument_types
 
 from funasr.modules.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt
 from funasr.modules.embedding import PositionalEncoding
@@ -181,7 +180,6 @@
             tf2torch_tensor_name_prefix_tf: str = "seq2seq/decoder",
             embed_tensor_name_prefix_tf: str = None,
     ):
-        assert check_argument_types()
         super().__init__(
             vocab_size=vocab_size,
             encoder_output_size=encoder_output_size,
@@ -838,7 +836,6 @@
         tf2torch_tensor_name_prefix_torch: str = "decoder",
         tf2torch_tensor_name_prefix_tf: str = "seq2seq/decoder",
     ):
-        assert check_argument_types()
         super().__init__(
             vocab_size=vocab_size,
             encoder_output_size=encoder_output_size,
diff --git a/funasr/models/decoder/transformer_decoder.py b/funasr/models/decoder/transformer_decoder.py
index 45fdda8..0a9c612 100644
--- a/funasr/models/decoder/transformer_decoder.py
+++ b/funasr/models/decoder/transformer_decoder.py
@@ -9,7 +9,6 @@
 
 import torch
 from torch import nn
-from typeguard import check_argument_types
 
 from funasr.models.decoder.abs_decoder import AbsDecoder
 from funasr.modules.attention import MultiHeadedAttention
@@ -184,7 +183,6 @@
             pos_enc_class=PositionalEncoding,
             normalize_before: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         attention_dim = encoder_output_size
 
@@ -373,7 +371,6 @@
             normalize_before: bool = True,
             concat_after: bool = False,
     ):
-        assert check_argument_types()
         super().__init__(
             vocab_size=vocab_size,
             encoder_output_size=encoder_output_size,
@@ -428,7 +425,6 @@
             concat_after: bool = False,
             embeds_id: int = -1,
     ):
-        assert check_argument_types()
         super().__init__(
             vocab_size=vocab_size,
             encoder_output_size=encoder_output_size,
@@ -540,7 +536,6 @@
             conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
             conv_usebias: int = False,
     ):
-        assert check_argument_types()
         if len(conv_kernel_length) != num_blocks:
             raise ValueError(
                 "conv_kernel_length must have equal number of values to num_blocks: "
@@ -602,7 +597,6 @@
             conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
             conv_usebias: int = False,
     ):
-        assert check_argument_types()
         if len(conv_kernel_length) != num_blocks:
             raise ValueError(
                 "conv_kernel_length must have equal number of values to num_blocks: "
@@ -664,7 +658,6 @@
             conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
             conv_usebias: int = False,
     ):
-        assert check_argument_types()
         if len(conv_kernel_length) != num_blocks:
             raise ValueError(
                 "conv_kernel_length must have equal number of values to num_blocks: "
@@ -726,7 +719,6 @@
             conv_kernel_length: Sequence[int] = (11, 11, 11, 11, 11, 11),
             conv_usebias: int = False,
     ):
-        assert check_argument_types()
         if len(conv_kernel_length) != num_blocks:
             raise ValueError(
                 "conv_kernel_length must have equal number of values to num_blocks: "
@@ -781,7 +773,6 @@
         pos_enc_class=PositionalEncoding,
         normalize_before: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         attention_dim = encoder_output_size
 
@@ -955,7 +946,6 @@
         normalize_before: bool = True,
         concat_after: bool = False,
     ):
-        assert check_argument_types()
         super().__init__(
             vocab_size=vocab_size,
             encoder_output_size=encoder_output_size,
diff --git a/funasr/models/e2e_asr.py b/funasr/models/e2e_asr.py
index e6e6a52..79c5387 100644
--- a/funasr/models/e2e_asr.py
+++ b/funasr/models/e2e_asr.py
@@ -11,7 +11,6 @@
 from typing import Union
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.losses.label_smoothing_loss import (
@@ -65,7 +64,6 @@
             preencoder: Optional[AbsPreEncoder] = None,
             postencoder: Optional[AbsPostEncoder] = None,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
diff --git a/funasr/models/e2e_asr_contextual_paraformer.py b/funasr/models/e2e_asr_contextual_paraformer.py
index cfb5008..4836663 100644
--- a/funasr/models/e2e_asr_contextual_paraformer.py
+++ b/funasr/models/e2e_asr_contextual_paraformer.py
@@ -9,7 +9,6 @@
 import numpy as np
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.models.ctc import CTC
@@ -73,7 +72,6 @@
         preencoder: Optional[AbsPreEncoder] = None,
         postencoder: Optional[AbsPostEncoder] = None,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
diff --git a/funasr/models/e2e_asr_mfcca.py b/funasr/models/e2e_asr_mfcca.py
index 3927e38..7dd3b8d 100644
--- a/funasr/models/e2e_asr_mfcca.py
+++ b/funasr/models/e2e_asr_mfcca.py
@@ -7,7 +7,6 @@
 from typing import Union
 import logging
 import torch
-from typeguard import check_argument_types
 
 from funasr.modules.e2e_asr_common import ErrorCalculator
 from funasr.modules.nets_utils import th_accuracy
@@ -65,7 +64,6 @@
             sym_blank: str = "<blank>",
             preencoder: Optional[AbsPreEncoder] = None,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert rnnt_decoder is None, "Not implemented"
 
diff --git a/funasr/models/e2e_asr_paraformer.py b/funasr/models/e2e_asr_paraformer.py
index 686038e..5a1a29b 100644
--- a/funasr/models/e2e_asr_paraformer.py
+++ b/funasr/models/e2e_asr_paraformer.py
@@ -10,7 +10,6 @@
 import torch
 import random
 import numpy as np
-from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.losses.label_smoothing_loss import (
@@ -80,7 +79,6 @@
             postencoder: Optional[AbsPostEncoder] = None,
             use_1st_decoder_loss: bool = False,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
@@ -645,7 +643,6 @@
             postencoder: Optional[AbsPostEncoder] = None,
             use_1st_decoder_loss: bool = False,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
@@ -1255,7 +1252,6 @@
             preencoder: Optional[AbsPreEncoder] = None,
             postencoder: Optional[AbsPostEncoder] = None,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
@@ -1528,7 +1524,6 @@
             preencoder: Optional[AbsPreEncoder] = None,
             postencoder: Optional[AbsPostEncoder] = None,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
@@ -1806,7 +1801,6 @@
             preencoder: Optional[AbsPreEncoder] = None,
             postencoder: Optional[AbsPostEncoder] = None,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
diff --git a/funasr/models/e2e_asr_transducer.py b/funasr/models/e2e_asr_transducer.py
index 4e33bd6..80914b1 100644
--- a/funasr/models/e2e_asr_transducer.py
+++ b/funasr/models/e2e_asr_transducer.py
@@ -6,7 +6,6 @@
 
 import torch
 from packaging.version import parse as V
-from typeguard import check_argument_types
 from funasr.losses.label_smoothing_loss import (
     LabelSmoothingLoss,  # noqa: H301
 )
@@ -85,8 +84,6 @@
     ) -> None:
         """Construct an ESPnetASRTransducerModel object."""
         super().__init__()
-
-        assert check_argument_types()
 
         # The following labels ID are reserved: 0 (blank) and vocab_size - 1 (sos/eos)
         self.blank_id = 0
@@ -546,8 +543,6 @@
         """Construct an ESPnetASRTransducerModel object."""
         super().__init__()
 
-        assert check_argument_types()
-
         # The following labels ID are reserved: 0 (blank) and vocab_size - 1 (sos/eos)
         self.blank_id = 0
 
@@ -713,7 +708,7 @@
             loss_lm = self._calc_lm_loss(decoder_out, target)
 
         loss_trans = loss_trans_utt + loss_trans_chunk
-        loss_ctc = loss_ctc + loss_ctc_chunk 
+        loss_ctc = loss_ctc + loss_ctc_chunk
         loss_ctc = loss_att + loss_att_chunk
 
         loss = (
@@ -1018,4 +1013,4 @@
             ignore_label=self.ignore_id,
         )
 
-        return loss_att, acc_att
+        return loss_att, acc_att
\ No newline at end of file
diff --git a/funasr/models/e2e_diar_eend_ola.py b/funasr/models/e2e_diar_eend_ola.py
index da7c674..ae3a436 100644
--- a/funasr/models/e2e_diar_eend_ola.py
+++ b/funasr/models/e2e_diar_eend_ola.py
@@ -9,7 +9,6 @@
 import numpy as np
 import torch
 import torch.nn as  nn
-from typeguard import check_argument_types
 
 from funasr.models.frontend.wav_frontend import WavFrontendMel23
 from funasr.modules.eend_ola.encoder import EENDOLATransformerEncoder
@@ -48,7 +47,6 @@
             mapping_dict=None,
             **kwargs,
     ):
-        assert check_argument_types()
 
         super().__init__()
         self.frontend = frontend
diff --git a/funasr/models/e2e_diar_sond.py b/funasr/models/e2e_diar_sond.py
index 9c3fb92..bc93b9d 100644
--- a/funasr/models/e2e_diar_sond.py
+++ b/funasr/models/e2e_diar_sond.py
@@ -12,7 +12,6 @@
 import numpy as np
 import torch
 from torch.nn import functional as F
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import to_device
 from funasr.modules.nets_utils import make_pad_mask
@@ -66,7 +65,6 @@
         inter_score_loss_weight: float = 0.0,
         inputs_type: str = "raw",
     ):
-        assert check_argument_types()
 
         super().__init__()
 
diff --git a/funasr/models/e2e_sa_asr.py b/funasr/models/e2e_sa_asr.py
index e209d51..cf1587d 100644
--- a/funasr/models/e2e_sa_asr.py
+++ b/funasr/models/e2e_sa_asr.py
@@ -12,7 +12,6 @@
 
 import torch
 import torch.nn.functional as F
-from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.losses.label_smoothing_loss import (
@@ -67,7 +66,6 @@
             sym_blank: str = "<blank>",
             extract_feats_in_collect_stats: bool = True,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
diff --git a/funasr/models/e2e_sv.py b/funasr/models/e2e_sv.py
index bd5178e..8be63d4 100644
--- a/funasr/models/e2e_sv.py
+++ b/funasr/models/e2e_sv.py
@@ -12,7 +12,6 @@
 from typing import Union
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.layers.abs_normalize import AbsNormalize
 from funasr.losses.label_smoothing_loss import (
@@ -56,7 +55,6 @@
             pooling_layer: torch.nn.Module,
             decoder: AbsDecoder,
     ):
-        assert check_argument_types()
 
         super().__init__()
         # note that eos is the same as sos (equivalent ID)
diff --git a/funasr/models/e2e_tp.py b/funasr/models/e2e_tp.py
index 33948f9..567dc70 100644
--- a/funasr/models/e2e_tp.py
+++ b/funasr/models/e2e_tp.py
@@ -9,7 +9,6 @@
 
 import torch
 import numpy as np
-from typeguard import check_argument_types
 
 from funasr.models.encoder.abs_encoder import AbsEncoder
 from funasr.models.frontend.abs_frontend import AbsFrontend
@@ -42,7 +41,6 @@
             predictor_bias: int = 0,
             token_list=None,
     ):
-        assert check_argument_types()
 
         super().__init__()
         # note that eos is the same as sos (equivalent ID)
diff --git a/funasr/models/e2e_uni_asr.py b/funasr/models/e2e_uni_asr.py
index 9ec3a39..8bc3b42 100644
--- a/funasr/models/e2e_uni_asr.py
+++ b/funasr/models/e2e_uni_asr.py
@@ -8,7 +8,6 @@
 from typing import Union
 
 import torch
-from typeguard import check_argument_types
 
 from funasr.models.e2e_asr_common import ErrorCalculator
 from funasr.modules.nets_utils import th_accuracy
@@ -82,7 +81,6 @@
         postencoder: Optional[AbsPostEncoder] = None,
         encoder1_encoder2_joint_training: bool = True,
     ):
-        assert check_argument_types()
         assert 0.0 <= ctc_weight <= 1.0, ctc_weight
         assert 0.0 <= interctc_weight < 1.0, interctc_weight
 
diff --git a/funasr/models/encoder/conformer_encoder.py b/funasr/models/encoder/conformer_encoder.py
index 994607f..e5fac62 100644
--- a/funasr/models/encoder/conformer_encoder.py
+++ b/funasr/models/encoder/conformer_encoder.py
@@ -12,7 +12,6 @@
 
 import torch
 from torch import nn
-from typeguard import check_argument_types
 
 from funasr.models.ctc import CTC
 from funasr.modules.attention import (
@@ -533,7 +532,6 @@
             interctc_use_conditioning: bool = False,
             stochastic_depth_rate: Union[float, List[float]] = 0.0,
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
@@ -943,7 +941,6 @@
         """Construct an Encoder object."""
         super().__init__()
 
-        assert check_argument_types()
 
         self.embed = StreamingConvInput(
             input_size,
diff --git a/funasr/models/encoder/data2vec_encoder.py b/funasr/models/encoder/data2vec_encoder.py
index 64c2144..8885f02 100644
--- a/funasr/models/encoder/data2vec_encoder.py
+++ b/funasr/models/encoder/data2vec_encoder.py
@@ -10,7 +10,6 @@
 import torch.distributed as dist
 import torch.nn as nn
 import torch.nn.functional as F
-from typeguard import check_argument_types
 
 from funasr.models.encoder.abs_encoder import AbsEncoder
 from funasr.modules.data2vec.data_utils import compute_mask_indices
@@ -97,7 +96,6 @@
             # FP16 optimization
             required_seq_len_multiple: int = 2,
     ):
-        assert check_argument_types()
         super().__init__()
 
         # ConvFeatureExtractionModel
diff --git a/funasr/models/encoder/mfcca_encoder.py b/funasr/models/encoder/mfcca_encoder.py
index 95ccf07..87bb19d 100644
--- a/funasr/models/encoder/mfcca_encoder.py
+++ b/funasr/models/encoder/mfcca_encoder.py
@@ -5,7 +5,6 @@
 import torch
 from torch import nn
 
-from typeguard import check_argument_types
 
 from funasr.models.encoder.encoder_layer_mfcca import EncoderLayer
 from funasr.modules.nets_utils import get_activation
@@ -161,7 +160,6 @@
             cnn_module_kernel: int = 31,
             padding_idx: int = -1,
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
diff --git a/funasr/models/encoder/opennmt_encoders/conv_encoder.py b/funasr/models/encoder/opennmt_encoders/conv_encoder.py
index eec854f..9ab5e6b 100644
--- a/funasr/models/encoder/opennmt_encoders/conv_encoder.py
+++ b/funasr/models/encoder/opennmt_encoders/conv_encoder.py
@@ -7,7 +7,6 @@
 import torch
 import torch.nn as nn
 from torch.nn import functional as F
-from typeguard import check_argument_types
 import numpy as np
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.modules.layer_norm import LayerNorm
@@ -90,7 +89,6 @@
             tf2torch_tensor_name_prefix_torch: str = "speaker_encoder",
             tf2torch_tensor_name_prefix_tf: str = "EAND/speaker_encoder",
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = num_units
 
diff --git a/funasr/models/encoder/opennmt_encoders/fsmn_encoder.py b/funasr/models/encoder/opennmt_encoders/fsmn_encoder.py
index e41b2aa..5f62e67 100644
--- a/funasr/models/encoder/opennmt_encoders/fsmn_encoder.py
+++ b/funasr/models/encoder/opennmt_encoders/fsmn_encoder.py
@@ -7,7 +7,6 @@
 import torch
 import torch.nn as nn
 from torch.nn import functional as F
-from typeguard import check_argument_types
 import numpy as np
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.modules.layer_norm import LayerNorm
diff --git a/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py b/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
index db30f08..7c83cbd 100644
--- a/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
+++ b/funasr/models/encoder/opennmt_encoders/self_attention_encoder.py
@@ -7,7 +7,6 @@
 import torch
 import torch.nn as nn
 from funasr.modules.streaming_utils.chunk_utilis import overlap_chunk
-from typeguard import check_argument_types
 import numpy as np
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.modules.attention import MultiHeadSelfAttention, MultiHeadedAttentionSANM
@@ -144,7 +143,6 @@
         tf2torch_tensor_name_prefix_tf: str = "seq2seq/encoder",
         out_units=None,
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
diff --git a/funasr/models/encoder/rnn_encoder.py b/funasr/models/encoder/rnn_encoder.py
index 59730da..434af09 100644
--- a/funasr/models/encoder/rnn_encoder.py
+++ b/funasr/models/encoder/rnn_encoder.py
@@ -5,7 +5,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.modules.rnn.encoders import RNN
@@ -37,7 +36,6 @@
         dropout: float = 0.0,
         subsample: Optional[Sequence[int]] = (2, 2, 1, 1),
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
         self.rnn_type = rnn_type
diff --git a/funasr/models/encoder/sanm_encoder.py b/funasr/models/encoder/sanm_encoder.py
index 46eabd1..45163df 100644
--- a/funasr/models/encoder/sanm_encoder.py
+++ b/funasr/models/encoder/sanm_encoder.py
@@ -8,7 +8,6 @@
 import torch.nn as nn
 import torch.nn.functional as F
 from funasr.modules.streaming_utils.chunk_utilis import overlap_chunk
-from typeguard import check_argument_types
 import numpy as np
 from funasr.torch_utils.device_funcs import to_device
 from funasr.modules.nets_utils import make_pad_mask
@@ -151,7 +150,6 @@
         tf2torch_tensor_name_prefix_torch: str = "encoder",
         tf2torch_tensor_name_prefix_tf: str = "seq2seq/encoder",
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
@@ -601,7 +599,6 @@
             tf2torch_tensor_name_prefix_torch: str = "encoder",
             tf2torch_tensor_name_prefix_tf: str = "seq2seq/encoder",
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
@@ -1060,7 +1057,6 @@
         sanm_shfit : int = 0,
         selfattention_layer_type: str = "sanm",
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
diff --git a/funasr/models/encoder/transformer_encoder.py b/funasr/models/encoder/transformer_encoder.py
index ff9c3db..4f2bef5 100644
--- a/funasr/models/encoder/transformer_encoder.py
+++ b/funasr/models/encoder/transformer_encoder.py
@@ -9,7 +9,6 @@
 
 import torch
 from torch import nn
-from typeguard import check_argument_types
 import logging
 
 from funasr.models.ctc import CTC
@@ -189,7 +188,6 @@
             interctc_layer_idx: List[int] = [],
             interctc_use_conditioning: bool = False,
     ):
-        assert check_argument_types()
         super().__init__()
         self._output_size = output_size
 
diff --git a/funasr/models/frontend/default.py b/funasr/models/frontend/default.py
index abbcd1b..b41af80 100644
--- a/funasr/models/frontend/default.py
+++ b/funasr/models/frontend/default.py
@@ -7,7 +7,6 @@
 import numpy as np
 import torch
 from torch_complex.tensor import ComplexTensor
-from typeguard import check_argument_types
 
 from funasr.layers.log_mel import LogMel
 from funasr.layers.stft import Stft
@@ -40,7 +39,6 @@
             apply_stft: bool = True,
             use_channel: int = None,
     ):
-        assert check_argument_types()
         super().__init__()
         if isinstance(fs, str):
             fs = humanfriendly.parse_size(fs)
@@ -167,7 +165,6 @@
             cmvn_file: str = None,
             mc: bool = True
     ):
-        assert check_argument_types()
         super().__init__()
         if isinstance(fs, str):
             fs = humanfriendly.parse_size(fs)
diff --git a/funasr/models/frontend/fused.py b/funasr/models/frontend/fused.py
index 857486d..ff95871 100644
--- a/funasr/models/frontend/fused.py
+++ b/funasr/models/frontend/fused.py
@@ -3,7 +3,6 @@
 from funasr.models.frontend.s3prl import S3prlFrontend
 import numpy as np
 import torch
-from typeguard import check_argument_types
 from typing import Tuple
 
 
@@ -12,7 +11,6 @@
         self, frontends=None, align_method="linear_projection", proj_dim=100, fs=16000
     ):
 
-        assert check_argument_types()
         super().__init__()
         self.align_method = (
             align_method  # fusing method : linear_projection only for now
diff --git a/funasr/models/frontend/s3prl.py b/funasr/models/frontend/s3prl.py
index b03d2c9..fdeb1c5 100644
--- a/funasr/models/frontend/s3prl.py
+++ b/funasr/models/frontend/s3prl.py
@@ -8,7 +8,6 @@
 
 import humanfriendly
 import torch
-from typeguard import check_argument_types
 
 from funasr.models.frontend.abs_frontend import AbsFrontend
 from funasr.modules.frontends.frontend import Frontend
@@ -37,7 +36,6 @@
             download_dir: str = None,
             multilayer_feature: bool = False,
     ):
-        assert check_argument_types()
         super().__init__()
         if isinstance(fs, str):
             fs = humanfriendly.parse_size(fs)
diff --git a/funasr/models/frontend/wav_frontend.py b/funasr/models/frontend/wav_frontend.py
index f16bdd9..acab13b 100644
--- a/funasr/models/frontend/wav_frontend.py
+++ b/funasr/models/frontend/wav_frontend.py
@@ -6,7 +6,6 @@
 import torch
 import torchaudio.compliance.kaldi as kaldi
 from torch.nn.utils.rnn import pad_sequence
-from typeguard import check_argument_types
 
 import funasr.models.frontend.eend_ola_feature as eend_ola_feature
 from funasr.models.frontend.abs_frontend import AbsFrontend
@@ -95,7 +94,6 @@
             snip_edges: bool = True,
             upsacle_samples: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         self.fs = fs
         self.window = window
@@ -227,7 +225,6 @@
             snip_edges: bool = True,
             upsacle_samples: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         self.fs = fs
         self.window = window
@@ -466,7 +463,6 @@
             lfr_m: int = 1,
             lfr_n: int = 1,
     ):
-        assert check_argument_types()
         super().__init__()
         self.fs = fs
         self.frame_length = frame_length
diff --git a/funasr/models/frontend/windowing.py b/funasr/models/frontend/windowing.py
index a526758..94c9d27 100644
--- a/funasr/models/frontend/windowing.py
+++ b/funasr/models/frontend/windowing.py
@@ -6,7 +6,6 @@
 
 from funasr.models.frontend.abs_frontend import AbsFrontend
 import torch
-from typeguard import check_argument_types
 from typing import Tuple
 
 
@@ -38,7 +37,6 @@
             padding: Padding (placeholder, currently not implemented).
             fs:  Sampling rate (placeholder for compatibility, not used).
         """
-        assert check_argument_types()
         super().__init__()
         self.fs = fs
         self.win_length = win_length
diff --git a/funasr/models/postencoder/hugging_face_transformers_postencoder.py b/funasr/models/postencoder/hugging_face_transformers_postencoder.py
index 1aad15d..c59e2b7 100644
--- a/funasr/models/postencoder/hugging_face_transformers_postencoder.py
+++ b/funasr/models/postencoder/hugging_face_transformers_postencoder.py
@@ -6,7 +6,6 @@
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.models.postencoder.abs_postencoder import AbsPostEncoder
-from typeguard import check_argument_types
 from typing import Tuple
 
 import copy
@@ -30,7 +29,6 @@
         model_name_or_path: str,
     ):
         """Initialize the module."""
-        assert check_argument_types()
         super().__init__()
 
         if not is_transformers_available:
diff --git a/funasr/models/preencoder/linear.py b/funasr/models/preencoder/linear.py
index c69b6ce..25f6720 100644
--- a/funasr/models/preencoder/linear.py
+++ b/funasr/models/preencoder/linear.py
@@ -5,7 +5,6 @@
 """Linear Projection."""
 
 from funasr.models.preencoder.abs_preencoder import AbsPreEncoder
-from typeguard import check_argument_types
 from typing import Tuple
 
 import torch
@@ -20,7 +19,6 @@
         output_size: int,
     ):
         """Initialize the module."""
-        assert check_argument_types()
         super().__init__()
 
         self.output_dim = output_size
diff --git a/funasr/models/preencoder/sinc.py b/funasr/models/preencoder/sinc.py
index fe6d2af..3baa4a8 100644
--- a/funasr/models/preencoder/sinc.py
+++ b/funasr/models/preencoder/sinc.py
@@ -10,7 +10,6 @@
 from funasr.layers.sinc_conv import SincConv
 import humanfriendly
 import torch
-from typeguard import check_argument_types
 from typing import Optional
 from typing import Tuple
 from typing import Union
@@ -60,7 +59,6 @@
             windowing_type: Choice of windowing function.
             scale_type:  Choice of filter-bank initialization scale.
         """
-        assert check_argument_types()
         super().__init__()
         if isinstance(fs, str):
             fs = humanfriendly.parse_size(fs)
@@ -268,7 +266,6 @@
             dropout_probability: Dropout probability.
             shape (tuple, list): Shape of input tensors.
         """
-        assert check_argument_types()
         super().__init__()
         if shape is None:
             shape = (0, 2, 1)
diff --git a/funasr/models/seq_rnn_lm.py b/funasr/models/seq_rnn_lm.py
index f7ddcae..bef4974 100644
--- a/funasr/models/seq_rnn_lm.py
+++ b/funasr/models/seq_rnn_lm.py
@@ -4,7 +4,6 @@
 
 import torch
 import torch.nn as nn
-from typeguard import check_argument_types
 from funasr.train.abs_model import AbsLM
 
 
@@ -27,7 +26,6 @@
         rnn_type: str = "lstm",
         ignore_id: int = 0,
     ):
-        assert check_argument_types()
         super().__init__()
 
         ninp = unit
diff --git a/funasr/modules/eend_ola/utils/report.py b/funasr/modules/eend_ola/utils/report.py
index bfccedf..f4a044b 100644
--- a/funasr/modules/eend_ola/utils/report.py
+++ b/funasr/modules/eend_ola/utils/report.py
@@ -2,7 +2,7 @@
 import numpy as np
 import time
 import torch
-from eend.utils.power import create_powerlabel
+from funasr.modules.eend_ola.utils.power import create_powerlabel
 from itertools import combinations
 
 metrics = [
diff --git a/funasr/optimizers/sgd.py b/funasr/optimizers/sgd.py
index 3f0d3d1..fb7a3df 100644
--- a/funasr/optimizers/sgd.py
+++ b/funasr/optimizers/sgd.py
@@ -1,5 +1,4 @@
 import torch
-from typeguard import check_argument_types
 
 
 class SGD(torch.optim.SGD):
@@ -21,7 +20,6 @@
         weight_decay: float = 0.0,
         nesterov: bool = False,
     ):
-        assert check_argument_types()
         super().__init__(
             params,
             lr=lr,
diff --git a/funasr/runtime/python/libtorch/funasr_torch/utils/frontend.py b/funasr/runtime/python/libtorch/funasr_torch/utils/frontend.py
index 11a8644..fe39955 100644
--- a/funasr/runtime/python/libtorch/funasr_torch/utils/frontend.py
+++ b/funasr/runtime/python/libtorch/funasr_torch/utils/frontend.py
@@ -3,7 +3,6 @@
 from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
 
 import numpy as np
-from typeguard import check_argument_types
 import kaldi_native_fbank as knf
 
 root_dir = Path(__file__).resolve().parent
@@ -28,7 +27,6 @@
             dither: float = 1.0,
             **kwargs,
     ) -> None:
-        check_argument_types()
 
         opts = knf.FbankOptions()
         opts.frame_opts.samp_freq = fs
diff --git a/funasr/runtime/python/libtorch/funasr_torch/utils/utils.py b/funasr/runtime/python/libtorch/funasr_torch/utils/utils.py
index 86e78bc..913ddc1 100644
--- a/funasr/runtime/python/libtorch/funasr_torch/utils/utils.py
+++ b/funasr/runtime/python/libtorch/funasr_torch/utils/utils.py
@@ -9,7 +9,6 @@
 import numpy as np
 import yaml
 
-from typeguard import check_argument_types
 
 import warnings
 
@@ -21,7 +20,6 @@
 class TokenIDConverter():
     def __init__(self, token_list: Union[List, str],
                  ):
-        check_argument_types()
 
         self.token_list = token_list
         self.unk_symbol = token_list[-1]
@@ -51,7 +49,6 @@
         space_symbol: str = "<space>",
         remove_non_linguistic_symbols: bool = False,
     ):
-        check_argument_types()
 
         self.space_symbol = space_symbol
         self.non_linguistic_symbols = self.load_symbols(symbol_value)
diff --git a/funasr/runtime/python/onnxruntime/funasr_onnx/utils/frontend.py b/funasr/runtime/python/onnxruntime/funasr_onnx/utils/frontend.py
index 5478236..ded04b6 100644
--- a/funasr/runtime/python/onnxruntime/funasr_onnx/utils/frontend.py
+++ b/funasr/runtime/python/onnxruntime/funasr_onnx/utils/frontend.py
@@ -4,7 +4,6 @@
 import copy
 
 import numpy as np
-from typeguard import check_argument_types
 import kaldi_native_fbank as knf
 
 root_dir = Path(__file__).resolve().parent
@@ -29,7 +28,6 @@
             dither: float = 1.0,
             **kwargs,
     ) -> None:
-        check_argument_types()
 
         opts = knf.FbankOptions()
         opts.frame_opts.samp_freq = fs
diff --git a/funasr/runtime/python/onnxruntime/funasr_onnx/utils/utils.py b/funasr/runtime/python/onnxruntime/funasr_onnx/utils/utils.py
index dcee425..9284943 100644
--- a/funasr/runtime/python/onnxruntime/funasr_onnx/utils/utils.py
+++ b/funasr/runtime/python/onnxruntime/funasr_onnx/utils/utils.py
@@ -10,7 +10,6 @@
 import yaml
 from onnxruntime import (GraphOptimizationLevel, InferenceSession,
                          SessionOptions, get_available_providers, get_device)
-from typeguard import check_argument_types
 
 import warnings
 
@@ -22,7 +21,6 @@
 class TokenIDConverter():
     def __init__(self, token_list: Union[List, str],
                  ):
-        check_argument_types()
 
         self.token_list = token_list
         self.unk_symbol = token_list[-1]
@@ -52,7 +50,6 @@
         space_symbol: str = "<space>",
         remove_non_linguistic_symbols: bool = False,
     ):
-        check_argument_types()
 
         self.space_symbol = space_symbol
         self.non_linguistic_symbols = self.load_symbols(symbol_value)
diff --git a/funasr/runtime/triton_gpu/model_repo_paraformer_large_offline/feature_extractor/1/model.py b/funasr/runtime/triton_gpu/model_repo_paraformer_large_offline/feature_extractor/1/model.py
index 2f84bb8..c556daf 100644
--- a/funasr/runtime/triton_gpu/model_repo_paraformer_large_offline/feature_extractor/1/model.py
+++ b/funasr/runtime/triton_gpu/model_repo_paraformer_large_offline/feature_extractor/1/model.py
@@ -109,7 +109,6 @@
             lfr_n: int = 6,
             dither: float = 1.0
     ) -> None:
-        # check_argument_types()
 
         self.fs = fs
         self.window = window
diff --git a/funasr/samplers/build_batch_sampler.py b/funasr/samplers/build_batch_sampler.py
index 074b446..9266fea 100644
--- a/funasr/samplers/build_batch_sampler.py
+++ b/funasr/samplers/build_batch_sampler.py
@@ -4,8 +4,6 @@
 from typing import Tuple
 from typing import Union
 
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.samplers.abs_sampler import AbsSampler
 from funasr.samplers.folded_batch_sampler import FoldedBatchSampler
@@ -104,7 +102,6 @@
         padding: Whether sequences are input as a padded tensor or not.
             used for "numel" mode
     """
-    assert check_argument_types()
     if len(shape_files) == 0:
         raise ValueError("No shape file are given")
 
@@ -164,5 +161,4 @@
 
     else:
         raise ValueError(f"Not supported: {type}")
-    assert check_return_type(retval)
     return retval
diff --git a/funasr/samplers/folded_batch_sampler.py b/funasr/samplers/folded_batch_sampler.py
index 48e9604..f48d744 100644
--- a/funasr/samplers/folded_batch_sampler.py
+++ b/funasr/samplers/folded_batch_sampler.py
@@ -4,7 +4,6 @@
 from typing import Tuple
 from typing import Union
 
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import load_num_sequence_text
 from funasr.fileio.read_text import read_2column_text
@@ -23,7 +22,6 @@
         drop_last: bool = False,
         utt2category_file: str = None,
     ):
-        assert check_argument_types()
         assert batch_size > 0
         if sort_batch != "ascending" and sort_batch != "descending":
             raise ValueError(
diff --git a/funasr/samplers/length_batch_sampler.py b/funasr/samplers/length_batch_sampler.py
index 8ee8bdc..28404e3 100644
--- a/funasr/samplers/length_batch_sampler.py
+++ b/funasr/samplers/length_batch_sampler.py
@@ -4,7 +4,6 @@
 from typing import Tuple
 from typing import Union
 
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import load_num_sequence_text
 from funasr.samplers.abs_sampler import AbsSampler
@@ -21,7 +20,6 @@
         drop_last: bool = False,
         padding: bool = True,
     ):
-        assert check_argument_types()
         assert batch_bins > 0
         if sort_batch != "ascending" and sort_batch != "descending":
             raise ValueError(
diff --git a/funasr/samplers/num_elements_batch_sampler.py b/funasr/samplers/num_elements_batch_sampler.py
index 0ffad92..ebed0e4 100644
--- a/funasr/samplers/num_elements_batch_sampler.py
+++ b/funasr/samplers/num_elements_batch_sampler.py
@@ -4,7 +4,6 @@
 from typing import Union
 
 import numpy as np
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import load_num_sequence_text
 from funasr.samplers.abs_sampler import AbsSampler
@@ -21,7 +20,6 @@
         drop_last: bool = False,
         padding: bool = True,
     ):
-        assert check_argument_types()
         assert batch_bins > 0
         if sort_batch != "ascending" and sort_batch != "descending":
             raise ValueError(
diff --git a/funasr/samplers/sorted_batch_sampler.py b/funasr/samplers/sorted_batch_sampler.py
index d6c3b41..b31e93e 100644
--- a/funasr/samplers/sorted_batch_sampler.py
+++ b/funasr/samplers/sorted_batch_sampler.py
@@ -2,7 +2,6 @@
 from typing import Iterator
 from typing import Tuple
 
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import load_num_sequence_text
 from funasr.samplers.abs_sampler import AbsSampler
@@ -26,7 +25,6 @@
         sort_batch: str = "ascending",
         drop_last: bool = False,
     ):
-        assert check_argument_types()
         assert batch_size > 0
         self.batch_size = batch_size
         self.shape_file = shape_file
diff --git a/funasr/samplers/unsorted_batch_sampler.py b/funasr/samplers/unsorted_batch_sampler.py
index 349e526..e5ed05b 100644
--- a/funasr/samplers/unsorted_batch_sampler.py
+++ b/funasr/samplers/unsorted_batch_sampler.py
@@ -2,7 +2,6 @@
 from typing import Iterator
 from typing import Tuple
 
-from typeguard import check_argument_types
 
 from funasr.fileio.read_text import read_2column_text
 from funasr.samplers.abs_sampler import AbsSampler
@@ -28,7 +27,6 @@
         drop_last: bool = False,
         utt2category_file: str = None,
     ):
-        assert check_argument_types()
         assert batch_size > 0
         self.batch_size = batch_size
         self.key_file = key_file
diff --git a/funasr/schedulers/noam_lr.py b/funasr/schedulers/noam_lr.py
index 80df019..e08fb63 100644
--- a/funasr/schedulers/noam_lr.py
+++ b/funasr/schedulers/noam_lr.py
@@ -4,7 +4,6 @@
 
 import torch
 from torch.optim.lr_scheduler import _LRScheduler
-from typeguard import check_argument_types
 
 from funasr.schedulers.abs_scheduler import AbsBatchStepScheduler
 
@@ -31,7 +30,6 @@
         warmup_steps: Union[int, float] = 25000,
         last_epoch: int = -1,
     ):
-        assert check_argument_types()
         self.model_size = model_size
         self.warmup_steps = warmup_steps
 
diff --git a/funasr/schedulers/tri_stage_scheduler.py b/funasr/schedulers/tri_stage_scheduler.py
index 8dc71b4..c442260 100644
--- a/funasr/schedulers/tri_stage_scheduler.py
+++ b/funasr/schedulers/tri_stage_scheduler.py
@@ -8,7 +8,6 @@
 
 import torch
 from torch.optim.lr_scheduler import _LRScheduler
-from typeguard import check_argument_types
 
 from funasr.schedulers.abs_scheduler import AbsBatchStepScheduler
 
@@ -22,7 +21,6 @@
             init_lr_scale: float = 0.01,
             final_lr_scale: float = 0.01,
     ):
-        assert check_argument_types()
         self.optimizer = optimizer
         self.last_epoch = last_epoch
         self.phase_ratio = phase_ratio
diff --git a/funasr/schedulers/warmup_lr.py b/funasr/schedulers/warmup_lr.py
index dbf3aca..95ebaca 100644
--- a/funasr/schedulers/warmup_lr.py
+++ b/funasr/schedulers/warmup_lr.py
@@ -3,7 +3,6 @@
 
 import torch
 from torch.optim.lr_scheduler import _LRScheduler
-from typeguard import check_argument_types
 
 from funasr.schedulers.abs_scheduler import AbsBatchStepScheduler
 
@@ -30,7 +29,6 @@
         warmup_steps: Union[int, float] = 25000,
         last_epoch: int = -1,
     ):
-        assert check_argument_types()
         self.warmup_steps = warmup_steps
 
         # __init__() must be invoked before setting field
diff --git a/funasr/tasks/abs_task.py b/funasr/tasks/abs_task.py
index 0fb77a9..91d33c5 100644
--- a/funasr/tasks/abs_task.py
+++ b/funasr/tasks/abs_task.py
@@ -32,8 +32,6 @@
 import yaml
 from funasr.models.base_model import FunASRModel
 from torch.utils.data import DataLoader
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr import __version__
 from funasr.datasets.dataset import AbsDataset
@@ -269,7 +267,6 @@
 
     @classmethod
     def get_parser(cls) -> config_argparse.ArgumentParser:
-        assert check_argument_types()
 
         class ArgumentDefaultsRawTextHelpFormatter(
             argparse.RawTextHelpFormatter,
@@ -959,7 +956,6 @@
         cls.trainer.add_arguments(parser)
         cls.add_task_arguments(parser)
 
-        assert check_return_type(parser)
         return parser
 
     @classmethod
@@ -1007,7 +1003,6 @@
             return _cls
 
         # This method is used only for --print_config
-        assert check_argument_types()
         parser = cls.get_parser()
         args, _ = parser.parse_known_args()
         config = vars(args)
@@ -1047,7 +1042,6 @@
 
     @classmethod
     def check_required_command_args(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if hasattr(args, "required"):
             for k in vars(args):
                 if "-" in k:
@@ -1077,7 +1071,6 @@
             inference: bool = False,
     ) -> None:
         """Check if the dataset satisfy the requirement of current Task"""
-        assert check_argument_types()
         mes = (
             f"If you intend to use an additional input, modify "
             f'"{cls.__name__}.required_data_names()" or '
@@ -1104,14 +1097,12 @@
 
     @classmethod
     def print_config(cls, file=sys.stdout) -> None:
-        assert check_argument_types()
         # Shows the config: e.g. python train.py asr --print_config
         config = cls.get_default_config()
         file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
 
     @classmethod
     def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
-        assert check_argument_types()
         print(get_commandline_args(), file=sys.stderr)
         if args is None:
             parser = cls.get_parser()
@@ -1148,7 +1139,6 @@
 
     @classmethod
     def main_worker(cls, args: argparse.Namespace):
-        assert check_argument_types()
 
         # 0. Init distributed process
         distributed_option = build_dataclass(DistributedOption, args)
@@ -1556,7 +1546,6 @@
         - 4 epoch with "--num_iters_per_epoch" == 4
 
         """
-        assert check_argument_types()
         iter_options = cls.build_iter_options(args, distributed_option, mode)
 
         # Overwrite iter_options if any kwargs is given
@@ -1589,7 +1578,6 @@
     def build_sequence_iter_factory(
             cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str
     ) -> AbsIterFactory:
-        assert check_argument_types()
 
         if hasattr(args, "frontend_conf"):
             if args.frontend_conf is not None and "fs" in args.frontend_conf:
@@ -1683,7 +1671,6 @@
             iter_options: IteratorOptions,
             mode: str,
     ) -> AbsIterFactory:
-        assert check_argument_types()
 
         dataset = ESPnetDataset(
             iter_options.data_path_and_name_and_type,
@@ -1788,7 +1775,6 @@
     def build_multiple_iter_factory(
             cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str
     ):
-        assert check_argument_types()
         iter_options = cls.build_iter_options(args, distributed_option, mode)
         assert len(iter_options.data_path_and_name_and_type) > 0, len(
             iter_options.data_path_and_name_and_type
@@ -1885,7 +1871,6 @@
             inference: bool = False,
     ) -> DataLoader:
         """Build DataLoader using iterable dataset"""
-        assert check_argument_types()
         # For backward compatibility for pytorch DataLoader
         if collate_fn is not None:
             kwargs = dict(collate_fn=collate_fn)
@@ -1935,7 +1920,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
diff --git a/funasr/tasks/asr.py b/funasr/tasks/asr.py
index 7338513..4b94aeb 100644
--- a/funasr/tasks/asr.py
+++ b/funasr/tasks/asr.py
@@ -13,8 +13,6 @@
 import numpy as np
 import torch
 import yaml
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import CommonPreprocessor
@@ -491,7 +489,6 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         # NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
         return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
 
@@ -499,7 +496,6 @@
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         if args.use_preprocessor:
             retval = CommonPreprocessor(
                 train=train,
@@ -529,7 +525,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -548,12 +543,10 @@
             cls, train: bool = True, inference: bool = False
     ) -> Tuple[str, ...]:
         retval = ()
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -658,7 +651,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
 
@@ -701,7 +693,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -838,7 +829,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
     # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@@ -860,7 +850,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
@@ -975,7 +964,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -1085,7 +1073,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
     # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@@ -1107,7 +1094,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
@@ -1210,7 +1196,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -1308,7 +1293,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
 
@@ -1333,7 +1317,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -1388,7 +1371,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
     @classmethod
@@ -1425,7 +1407,6 @@
         Return:
             model: ASR Transducer model.
         """
-        assert check_argument_types()
 
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
@@ -1524,7 +1505,6 @@
                 "Initialization part will be reworked in a short future.",
             )
 
-        #assert check_return_type(model)
 
         return model
 
@@ -1559,7 +1539,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -1645,5 +1624,4 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
diff --git a/funasr/tasks/data2vec.py b/funasr/tasks/data2vec.py
index 9a64e1f..b11d7de 100644
--- a/funasr/tasks/data2vec.py
+++ b/funasr/tasks/data2vec.py
@@ -8,8 +8,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import CommonPreprocessor
@@ -256,14 +254,12 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         return CommonCollateFn(clipping=True)
 
     @classmethod
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         if args.use_preprocessor:
             retval = CommonPreprocessor(
                 train=train,
@@ -289,7 +285,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -305,12 +300,10 @@
             cls, train: bool = True, inference: bool = False
     ) -> Tuple[str, ...]:
         retval = ()
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
 
         # 1. frontend
         if args.input_size is None:
@@ -372,5 +365,4 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
diff --git a/funasr/tasks/diar.py b/funasr/tasks/diar.py
index 2625fec..a486a46 100644
--- a/funasr/tasks/diar.py
+++ b/funasr/tasks/diar.py
@@ -21,8 +21,6 @@
 import numpy as np
 import torch
 import yaml
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import CommonPreprocessor
@@ -344,7 +342,6 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         # NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
         return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
 
@@ -352,7 +349,6 @@
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         if args.use_preprocessor:
             retval = CommonPreprocessor(
                 train=train,
@@ -382,7 +378,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -401,12 +396,10 @@
             cls, train: bool = True, inference: bool = False
     ) -> Tuple[str, ...]:
         retval = ()
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -505,7 +498,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
     # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@@ -528,7 +520,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
@@ -764,7 +755,6 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         # NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
         return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
 
@@ -772,7 +762,6 @@
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         # if args.use_preprocessor:
         #     retval = CommonPreprocessor(
         #         train=train,
@@ -802,7 +791,6 @@
         #     )
         # else:
         #     retval = None
-        # assert check_return_type(retval)
         return None
 
     @classmethod
@@ -821,12 +809,10 @@
             cls, train: bool = True, inference: bool = False
     ) -> Tuple[str, ...]:
         retval = ()
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
 
         # 1. frontend
         if args.input_size is None or args.frontend == "wav_frontend_mel23":
@@ -865,7 +851,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
     # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@@ -888,7 +873,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
diff --git a/funasr/tasks/lm.py b/funasr/tasks/lm.py
index 44fdf8e..c0259a8 100644
--- a/funasr/tasks/lm.py
+++ b/funasr/tasks/lm.py
@@ -9,8 +9,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import CommonPreprocessor
@@ -52,7 +50,6 @@
     @classmethod
     def add_task_arguments(cls, parser: argparse.ArgumentParser):
         # NOTE(kamo): Use '_' instead of '-' to avoid confusion
-        assert check_argument_types()
         group = parser.add_argument_group(description="Task related")
 
         # NOTE(kamo): add_arguments(..., required=True) can't be used
@@ -130,7 +127,6 @@
         for class_choices in cls.class_choices_list:
             class_choices.add_arguments(group)
 
-        assert check_return_type(parser)
         return parser
 
     @classmethod
@@ -140,14 +136,12 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         return CommonCollateFn(int_pad_value=0)
 
     @classmethod
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         if args.use_preprocessor:
             retval = CommonPreprocessor(
                 train=train,
@@ -160,7 +154,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -179,7 +172,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace) -> LanguageModel:
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
diff --git a/funasr/tasks/punctuation.py b/funasr/tasks/punctuation.py
index a63bbe4..de5c897 100644
--- a/funasr/tasks/punctuation.py
+++ b/funasr/tasks/punctuation.py
@@ -9,8 +9,6 @@
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import PuncTrainTokenizerCommonPreprocessor
@@ -47,7 +45,6 @@
     @classmethod
     def add_task_arguments(cls, parser: argparse.ArgumentParser):
         # NOTE(kamo): Use '_' instead of '-' to avoid confusion
-        assert check_argument_types()
         group = parser.add_argument_group(description="Task related")
 
         # NOTE(kamo): add_arguments(..., required=True) can't be used
@@ -126,7 +123,6 @@
             # e.g. --encoder and --encoder_conf
             class_choices.add_arguments(group)
 
-        assert check_return_type(parser)
         return parser
 
     @classmethod
@@ -136,14 +132,12 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         return CommonCollateFn(int_pad_value=0)
 
     @classmethod
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         token_types = [args.token_type, args.token_type]
         token_lists = [args.token_list, args.punc_list]
         bpemodels = [args.bpemodel, args.bpemodel]
@@ -161,7 +155,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -182,7 +175,6 @@
 
     @classmethod
     def build_model(cls, args: argparse.Namespace) -> PunctuationModel:
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -223,5 +215,4 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
diff --git a/funasr/tasks/sa_asr.py b/funasr/tasks/sa_asr.py
index 9579483..e7ee5a3 100644
--- a/funasr/tasks/sa_asr.py
+++ b/funasr/tasks/sa_asr.py
@@ -13,8 +13,6 @@
 import numpy as np
 import torch
 import yaml
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import CommonPreprocessor
@@ -445,7 +443,6 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         # NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
         return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
 
@@ -453,7 +450,6 @@
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         if args.use_preprocessor:
             retval = CommonPreprocessor(
                 train=train,
@@ -483,7 +479,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -502,12 +497,10 @@
             cls, train: bool = True, inference: bool = False
     ) -> Tuple[str, ...]:
         retval = ()
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -619,5 +612,4 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
\ No newline at end of file
diff --git a/funasr/tasks/sv.py b/funasr/tasks/sv.py
index e4815da..e698522 100644
--- a/funasr/tasks/sv.py
+++ b/funasr/tasks/sv.py
@@ -17,8 +17,6 @@
 import numpy as np
 import torch
 import yaml
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.datasets.preprocessor import CommonPreprocessor
@@ -273,7 +271,6 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         # NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
         return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
 
@@ -281,7 +278,6 @@
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         if args.use_preprocessor:
             retval = CommonPreprocessor(
                 train=train,
@@ -309,7 +305,6 @@
             )
         else:
             retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -330,12 +325,10 @@
         retval = ()
         if inference:
             retval = ("ref_speech",)
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace) -> ESPnetSVModel:
-        assert check_argument_types()
         if isinstance(args.token_list, str):
             with open(args.token_list, encoding="utf-8") as f:
                 token_list = [line.rstrip() for line in f]
@@ -449,7 +442,6 @@
         if args.init is not None:
             initialize(model, args.init)
 
-        assert check_return_type(model)
         return model
 
     # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@@ -472,7 +464,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
diff --git a/funasr/tasks/vad.py b/funasr/tasks/vad.py
index ec95596..822be22 100644
--- a/funasr/tasks/vad.py
+++ b/funasr/tasks/vad.py
@@ -13,8 +13,6 @@
 import numpy as np
 import torch
 import yaml
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.datasets.collate_fn import CommonCollateFn
 from funasr.layers.abs_normalize import AbsNormalize
@@ -192,7 +190,6 @@
         [Collection[Tuple[str, Dict[str, np.ndarray]]]],
         Tuple[List[str], Dict[str, torch.Tensor]],
     ]:
-        assert check_argument_types()
         # NOTE(kamo): int value = 0 is reserved by CTC-blank symbol
         return CommonCollateFn(float_pad_value=0.0, int_pad_value=-1)
 
@@ -200,7 +197,6 @@
     def build_preprocess_fn(
             cls, args: argparse.Namespace, train: bool
     ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
-        assert check_argument_types()
         # if args.use_preprocessor:
         #    retval = CommonPreprocessor(
         #        train=train,
@@ -223,7 +219,6 @@
         # else:
         #    retval = None
         retval = None
-        assert check_return_type(retval)
         return retval
 
     @classmethod
@@ -242,12 +237,10 @@
             cls, train: bool = True, inference: bool = False
     ) -> Tuple[str, ...]:
         retval = ()
-        assert check_return_type(retval)
         return retval
 
     @classmethod
     def build_model(cls, args: argparse.Namespace):
-        assert check_argument_types()
         # 4. Encoder
         encoder_class = encoder_choices.get_class(args.encoder)
         encoder = encoder_class(**args.encoder_conf)
@@ -297,7 +290,6 @@
             device: Device type, "cpu", "cuda", or "cuda:N".
 
         """
-        assert check_argument_types()
         if config_file is None:
             assert model_file is not None, (
                 "The argument 'model_file' must be provided "
diff --git a/funasr/text/build_tokenizer.py b/funasr/text/build_tokenizer.py
index 8e29d3e..c60a335 100644
--- a/funasr/text/build_tokenizer.py
+++ b/funasr/text/build_tokenizer.py
@@ -2,7 +2,6 @@
 from typing import Iterable
 from typing import Union
 
-from typeguard import check_argument_types
 
 from funasr.text.abs_tokenizer import AbsTokenizer
 from funasr.text.char_tokenizer import CharTokenizer
@@ -21,7 +20,6 @@
     g2p_type: str = None,
 ) -> AbsTokenizer:
     """A helper function to instantiate Tokenizer"""
-    assert check_argument_types()
     if token_type == "bpe":
         if bpemodel is None:
             raise ValueError('bpemodel is required if token_type = "bpe"')
diff --git a/funasr/text/char_tokenizer.py b/funasr/text/char_tokenizer.py
index 00ae427..8d1daf4 100644
--- a/funasr/text/char_tokenizer.py
+++ b/funasr/text/char_tokenizer.py
@@ -4,7 +4,6 @@
 from typing import Union
 import warnings
 
-from typeguard import check_argument_types
 
 from funasr.text.abs_tokenizer import AbsTokenizer
 
@@ -16,7 +15,6 @@
         space_symbol: str = "<space>",
         remove_non_linguistic_symbols: bool = False,
     ):
-        assert check_argument_types()
         self.space_symbol = space_symbol
         if non_linguistic_symbols is None:
             self.non_linguistic_symbols = set()
diff --git a/funasr/text/cleaner.py b/funasr/text/cleaner.py
index be26940..6322672 100644
--- a/funasr/text/cleaner.py
+++ b/funasr/text/cleaner.py
@@ -2,7 +2,6 @@
 
 from jaconv import jaconv
 import tacotron_cleaner.cleaners
-from typeguard import check_argument_types
 
 try:
     from vietnamese_cleaner import vietnamese_cleaners
@@ -21,7 +20,6 @@
     """
 
     def __init__(self, cleaner_types: Collection[str] = None):
-        assert check_argument_types()
 
         if cleaner_types is None:
             self.cleaner_types = []
diff --git a/funasr/text/phoneme_tokenizer.py b/funasr/text/phoneme_tokenizer.py
index d424b40..ad3d81c 100644
--- a/funasr/text/phoneme_tokenizer.py
+++ b/funasr/text/phoneme_tokenizer.py
@@ -9,7 +9,6 @@
 
 # import g2p_en
 import jamo
-from typeguard import check_argument_types
 
 from funasr.text.abs_tokenizer import AbsTokenizer
 
@@ -365,7 +364,6 @@
         space_symbol: str = "<space>",
         remove_non_linguistic_symbols: bool = False,
     ):
-        assert check_argument_types()
         if g2p_type is None:
             self.g2p = split_by_space
         elif g2p_type == "g2p_en":
diff --git a/funasr/text/sentencepiece_tokenizer.py b/funasr/text/sentencepiece_tokenizer.py
index e4cc152..e393cee 100644
--- a/funasr/text/sentencepiece_tokenizer.py
+++ b/funasr/text/sentencepiece_tokenizer.py
@@ -4,14 +4,12 @@
 from typing import Union
 
 import sentencepiece as spm
-from typeguard import check_argument_types
 
 from funasr.text.abs_tokenizer import AbsTokenizer
 
 
 class SentencepiecesTokenizer(AbsTokenizer):
     def __init__(self, model: Union[Path, str]):
-        assert check_argument_types()
         self.model = str(model)
         # NOTE(kamo):
         # Don't build SentencePieceProcessor in __init__()
diff --git a/funasr/text/token_id_converter.py b/funasr/text/token_id_converter.py
index c9a6b28..1888d75 100644
--- a/funasr/text/token_id_converter.py
+++ b/funasr/text/token_id_converter.py
@@ -5,7 +5,6 @@
 from typing import Union
 
 import numpy as np
-from typeguard import check_argument_types
 
 
 class TokenIDConverter:
@@ -14,7 +13,6 @@
         token_list: Union[Path, str, Iterable[str]],
         unk_symbol: str = "<unk>",
     ):
-        assert check_argument_types()
 
         if isinstance(token_list, (Path, str)):
             token_list = Path(token_list)
diff --git a/funasr/text/word_tokenizer.py b/funasr/text/word_tokenizer.py
index 842734e..f4d33d5 100644
--- a/funasr/text/word_tokenizer.py
+++ b/funasr/text/word_tokenizer.py
@@ -4,7 +4,6 @@
 from typing import Union
 import warnings
 
-from typeguard import check_argument_types
 
 from funasr.text.abs_tokenizer import AbsTokenizer
 
@@ -16,7 +15,6 @@
         non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
         remove_non_linguistic_symbols: bool = False,
     ):
-        assert check_argument_types()
         self.delimiter = delimiter
 
         if not remove_non_linguistic_symbols and non_linguistic_symbols is not None:
diff --git a/funasr/torch_utils/forward_adaptor.py b/funasr/torch_utils/forward_adaptor.py
index 114af78..eb6da2b 100644
--- a/funasr/torch_utils/forward_adaptor.py
+++ b/funasr/torch_utils/forward_adaptor.py
@@ -1,5 +1,4 @@
 import torch
-from typeguard import check_argument_types
 
 
 class ForwardAdaptor(torch.nn.Module):
@@ -21,7 +20,6 @@
     """
 
     def __init__(self, module: torch.nn.Module, name: str):
-        assert check_argument_types()
         super().__init__()
         self.module = module
         self.name = name
diff --git a/funasr/torch_utils/initialize.py b/funasr/torch_utils/initialize.py
index 2c0e7a4..e4ec534 100644
--- a/funasr/torch_utils/initialize.py
+++ b/funasr/torch_utils/initialize.py
@@ -4,7 +4,6 @@
 
 import math
 import torch
-from typeguard import check_argument_types
 
 
 def initialize(model: torch.nn.Module, init: str):
@@ -19,7 +18,6 @@
         model: Target.
         init: Method of initialization.
     """
-    assert check_argument_types()
 
     if init == "chainer":
         # 1. lecun_normal_init_parameters
diff --git a/funasr/train/abs_model.py b/funasr/train/abs_model.py
index 8d684be..9687376 100644
--- a/funasr/train/abs_model.py
+++ b/funasr/train/abs_model.py
@@ -8,7 +8,6 @@
 
 import torch
 import torch.nn.functional as F
-from typeguard import check_argument_types
 
 from funasr.modules.nets_utils import make_pad_mask
 from funasr.torch_utils.device_funcs import force_gatherable
@@ -34,7 +33,6 @@
 
 class LanguageModel(FunASRModel):
     def __init__(self, lm: AbsLM, vocab_size: int, ignore_id: int = 0):
-        assert check_argument_types()
         super().__init__()
         self.lm = lm
         self.sos = 1
@@ -154,7 +152,6 @@
 class PunctuationModel(FunASRModel):
     
     def __init__(self, punc_model: torch.nn.Module, vocab_size: int, ignore_id: int = 0, punc_weight: list = None):
-        assert check_argument_types()
         super().__init__()
         self.punc_model = punc_model
         self.punc_weight = torch.Tensor(punc_weight)
diff --git a/funasr/train/class_choices.py b/funasr/train/class_choices.py
index 658d291..1ffb97a 100644
--- a/funasr/train/class_choices.py
+++ b/funasr/train/class_choices.py
@@ -2,8 +2,6 @@
 from typing import Optional
 from typing import Tuple
 
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.utils.nested_dict_action import NestedDictAction
 from funasr.utils.types import str_or_none
@@ -40,7 +38,6 @@
         default: str = None,
         optional: bool = False,
     ):
-        assert check_argument_types()
         self.name = name
         self.base_type = type_check
         self.classes = {k.lower(): v for k, v in classes.items()}
@@ -64,12 +61,10 @@
             return retval
 
     def get_class(self, name: Optional[str]) -> Optional[type]:
-        assert check_argument_types()
         if name is None or (self.optional and name.lower() == ("none", "null", "nil")):
             retval = None
         elif name.lower() in self.classes:
             class_obj = self.classes[name]
-            assert check_return_type(class_obj)
             retval = class_obj
         else:
             raise ValueError(
diff --git a/funasr/train/reporter.py b/funasr/train/reporter.py
index 2921fef..cfe31f5 100644
--- a/funasr/train/reporter.py
+++ b/funasr/train/reporter.py
@@ -18,8 +18,6 @@
 import humanfriendly
 import numpy as np
 import torch
-from typeguard import check_argument_types
-from typeguard import check_return_type
 
 Num = Union[float, int, complex, torch.Tensor, np.ndarray]
 
@@ -27,7 +25,6 @@
 
 
 def to_reported_value(v: Num, weight: Num = None) -> "ReportedValue":
-    assert check_argument_types()
     if isinstance(v, (torch.Tensor, np.ndarray)):
         if np.prod(v.shape) != 1:
             raise ValueError(f"v must be 0 or 1 dimension: {len(v.shape)}")
@@ -42,12 +39,10 @@
         retval = WeightedAverage(v, weight)
     else:
         retval = Average(v)
-    assert check_return_type(retval)
     return retval
 
 
 def aggregate(values: Sequence["ReportedValue"]) -> Num:
-    assert check_argument_types()
 
     for v in values:
         if not isinstance(v, type(values[0])):
@@ -86,7 +81,6 @@
 
     else:
         raise NotImplementedError(f"type={type(values[0])}")
-    assert check_return_type(retval)
     return retval
 
 
@@ -122,7 +116,6 @@
     """
 
     def __init__(self, key: str, epoch: int, total_count: int):
-        assert check_argument_types()
         self.key = key
         self.epoch = epoch
         self.start_time = time.perf_counter()
@@ -160,7 +153,6 @@
             stats: Dict[str, Optional[Union[Num, Dict[str, Num]]]],
             weight: Num = None,
     ) -> None:
-        assert check_argument_types()
         if self._finished:
             raise RuntimeError("Already finished")
         if len(self._seen_keys_in_the_step) == 0:
@@ -293,7 +285,6 @@
     """
 
     def __init__(self, epoch: int = 0):
-        assert check_argument_types()
         if epoch < 0:
             raise ValueError(f"epoch must be 0 or more: {epoch}")
         self.epoch = epoch
diff --git a/funasr/train/trainer.py b/funasr/train/trainer.py
index f066909..a25f39a 100644
--- a/funasr/train/trainer.py
+++ b/funasr/train/trainer.py
@@ -26,7 +26,6 @@
 import torch
 import torch.nn
 import torch.optim
-from typeguard import check_argument_types
 
 from funasr.iterators.abs_iter_factory import AbsIterFactory
 from funasr.main_funcs.average_nbest_models import average_nbest_models
@@ -127,7 +126,6 @@
     @classmethod
     def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
         """Build options consumed by train(), eval()"""
-        assert check_argument_types()
         return build_dataclass(TrainerOptions, args)
 
     @classmethod
@@ -188,7 +186,6 @@
         distributed_option: DistributedOption,
     ) -> None:
         """Perform training. This method performs the main process of training."""
-        assert check_argument_types()
         # NOTE(kamo): Don't check the type more strictly as far trainer_options
         assert is_dataclass(trainer_options), type(trainer_options)
         assert len(optimizers) == len(schedulers), (len(optimizers), len(schedulers))
@@ -551,7 +548,6 @@
         options: TrainerOptions,
         distributed_option: DistributedOption,
     ) -> Tuple[bool, bool]:
-        assert check_argument_types()
 
         grad_noise = options.grad_noise
         accum_grad = options.accum_grad
@@ -845,7 +841,6 @@
         options: TrainerOptions,
         distributed_option: DistributedOption,
     ) -> None:
-        assert check_argument_types()
         ngpu = options.ngpu
         no_forward_run = options.no_forward_run
         distributed = distributed_option.distributed
diff --git a/funasr/utils/griffin_lim.py b/funasr/utils/griffin_lim.py
index c1536d5..9e98ab8 100644
--- a/funasr/utils/griffin_lim.py
+++ b/funasr/utils/griffin_lim.py
@@ -9,7 +9,6 @@
 
 from distutils.version import LooseVersion
 from functools import partial
-from typeguard import check_argument_types
 from typing import Optional
 
 import librosa
@@ -138,7 +137,6 @@
             griffin_lim_iters: The number of iterations.
 
         """
-        assert check_argument_types()
         self.fs = fs
         self.logmel2linear = (
             partial(
diff --git a/setup.py b/setup.py
index f13a2c2..30e1f1a 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,6 @@
 
 import os
 
-from distutils.version import LooseVersion
 from setuptools import find_packages
 from setuptools import setup
 
@@ -12,73 +11,45 @@
 requirements = {
     "install": [
         "setuptools>=38.5.1",
-        # "configargparse>=1.2.1",
-        "typeguard==2.13.3",
+        "typeguard>=3.0.1",
         "humanfriendly",
         "scipy>=1.4.1",
-        # "filelock",
         "librosa",
-        "jamo==0.4.1",  # For kss
+        "jamo",  # For kss
         "PyYAML>=5.1.2",
-        "soundfile>=0.11.0",
+        "soundfile>=0.10.2",
         "h5py>=2.10.0",
         "kaldiio>=2.17.0",
         "torch_complex",
         "nltk>=3.4.5",
         # ASR
         "sentencepiece",
-        # "ctc-segmentation<1.8,>=1.6.6",
         # TTS
-        # "pyworld>=0.2.10",
-        "pypinyin<=0.44.0",
+        "pypinyin>=0.44.0",
         "espnet_tts_frontend",
         # ENH
-        # "ci_sdr",
         "pytorch_wpe",
         "editdistance>=0.5.2",
-        "tensorboard==1.15",
+        "tensorboard",
         "g2p",
         # PAI
         "oss2",
-        # "kaldi-native-fbank",
-        # timestamp
         "edit-distance",
-        # textgrid
         "textgrid",
-        "protobuf==3.20.0",
+        "protobuf",
     ],
     # train: The modules invoked when training only.
     "train": [
-        # "pillow>=6.1.0",
-        "editdistance==0.5.2",
+        "editdistance",
         "wandb",
-    ],
-    # recipe: The modules actually are not invoked in the main module of funasr,
-    #         but are invoked for the python scripts in each recipe
-    "recipe": [
-        "espnet_model_zoo",
-        # "gdown",
-        # "resampy",
-        # "pysptk>=0.1.17",
-        # "morfessor",  # for zeroth-korean
-        # "youtube_dl",  # for laborotv
-        # "nnmnkwii",
-        # "museval>=0.2.1",
-        # "pystoi>=0.2.2",
-        # "mir-eval>=0.6",
-        # "fastdtw",
-        # "nara_wpe>=0.0.5",
-        # "sacrebleu>=1.5.1",
     ],
     # all: The modules should be optionally installled due to some reason.
     #      Please consider moving them to "install" occasionally
-    # NOTE(kamo): The modules in "train" and "recipe" are appended into "all"
     "all": [
         # NOTE(kamo): Append modules requiring specific pytorch version or torch>1.3.0
         "torch_optimizer",
         "fairscale",
         "transformers",
-        # "gtn==0.0.0",
     ],
     "setup": [
         "numpy",
@@ -98,17 +69,18 @@
         "black",
     ],
     "doc": [
-        "Jinja2<3.1",
-        "Sphinx==2.1.2",
+        "Jinja2",
+        "Sphinx",
         "sphinx-rtd-theme>=0.2.4",
         "sphinx-argparse>=0.2.5",
-        "commonmark==0.8.1",
+        "commonmark",
         "recommonmark>=0.4.0",
         "nbsphinx>=0.4.2",
         "sphinx-markdown-tables>=0.0.12",
+        "configargparse>=1.2.1"
     ],
 }
-requirements["all"].extend(requirements["train"] + requirements["recipe"])
+requirements["all"].extend(requirements["train"])
 requirements["test"].extend(requirements["train"])
 
 install_requires = requirements["install"]
@@ -151,4 +123,4 @@
         "License :: OSI Approved :: Apache Software License",
         "Topic :: Software Development :: Libraries :: Python Modules",
     ],
-)
+)
\ No newline at end of file

--
Gitblit v1.9.1