嘉渊
2023-06-14 520bbc3d5cd9e8039b3287a5a5eea28d2976f26f
funasr/build_utils/build_asr_model.py
@@ -6,6 +6,7 @@
from funasr.models.decoder.abs_decoder import AbsDecoder
from funasr.models.decoder.contextual_decoder import ContextualParaformerDecoder
from funasr.models.decoder.rnn_decoder import RNNDecoder
from funasr.models.decoder.rnnt_decoder import RNNTDecoder
from funasr.models.decoder.sanm_decoder import ParaformerSANMDecoder, FsmnDecoderSCAMAOpt
from funasr.models.decoder.transformer_decoder import (
    DynamicConvolution2DTransformerDecoder,  # noqa: H301
@@ -21,10 +22,12 @@
from funasr.models.decoder.transformer_decoder import TransformerDecoder
from funasr.models.e2e_asr import ASRModel
from funasr.models.e2e_asr_mfcca import MFCCA
from funasr.models.e2e_asr_paraformer import Paraformer, ParaformerBert, BiCifParaformer, ContextualParaformer
from funasr.models.e2e_asr_paraformer import Paraformer, ParaformerOnline, ParaformerBert, BiCifParaformer, \
    ContextualParaformer
from funasr.models.e2e_asr_transducer import TransducerModel, UnifiedTransducerModel
from funasr.models.e2e_tp import TimestampPredictor
from funasr.models.e2e_uni_asr import UniASR
from funasr.models.encoder.conformer_encoder import ConformerEncoder
from funasr.models.encoder.conformer_encoder import ConformerEncoder, ConformerChunkEncoder
from funasr.models.encoder.data2vec_encoder import Data2VecEncoder
from funasr.models.encoder.mfcca_encoder import MFCCAEncoder
from funasr.models.encoder.rnn_encoder import RNNEncoder
@@ -36,6 +39,7 @@
from funasr.models.frontend.s3prl import S3prlFrontend
from funasr.models.frontend.wav_frontend import WavFrontend
from funasr.models.frontend.windowing import SlidingWindow
from funasr.models.joint_net.joint_network import JointNetwork
from funasr.models.predictor.cif import CifPredictor, CifPredictorV2, CifPredictorV3
from funasr.models.specaug.specaug import SpecAug
from funasr.models.specaug.specaug import SpecAugLFR
@@ -79,11 +83,14 @@
        asr=ASRModel,
        uniasr=UniASR,
        paraformer=Paraformer,
        paraformer_online=ParaformerOnline,
        paraformer_bert=ParaformerBert,
        bicif_paraformer=BiCifParaformer,
        contextual_paraformer=ContextualParaformer,
        mfcca=MFCCA,
        timestamp_prediction=TimestampPredictor,
        rnnt=TransducerModel,
        rnnt_unified=UnifiedTransducerModel,
    ),
    default="asr",
)
@@ -97,6 +104,7 @@
        sanm_chunk_opt=SANMEncoderChunkOpt,
        data2vec_encoder=Data2VecEncoder,
        mfcca_enc=MFCCAEncoder,
        chunk_conformer=ConformerChunkEncoder,
    ),
    default="rnn",
)
@@ -171,6 +179,23 @@
    default="stride_conv1d",
    optional=True,
)
rnnt_decoder_choices = ClassChoices(
    name="rnnt_decoder",
    classes=dict(
        rnnt=RNNTDecoder,
    ),
    default="rnnt",
    optional=True,
)
joint_network_choices = ClassChoices(
    name="joint_network",
    classes=dict(
        joint_network=JointNetwork,
    ),
    default="joint_network",
    optional=True,
)
class_choices_list = [
    # --frontend and --frontend_conf
    frontend_choices,
@@ -194,22 +219,30 @@
    predictor_choices2,
    # --stride_conv and --stride_conv_conf
    stride_conv_choices,
    # --rnnt_decoder and --rnnt_decoder_conf
    rnnt_decoder_choices,
    # --joint_network and --joint_network_conf
    joint_network_choices,
]
def build_asr_model(args):
    # token_list
    if args.token_list is not None:
        with open(args.token_list) as f:
    if isinstance(args.token_list, str):
        with open(args.token_list, encoding="utf-8") as f:
            token_list = [line.rstrip() for line in f]
        args.token_list = list(token_list)
        vocab_size = len(token_list)
        logging.info(f"Vocabulary size: {vocab_size}")
    elif isinstance(args.token_list, (tuple, list)):
        token_list = list(args.token_list)
        vocab_size = len(token_list)
        logging.info(f"Vocabulary size: {vocab_size}")
    else:
        vocab_size = None
    # frontend
    if args.input_size is None:
    if hasattr(args, "input_size") and args.input_size is None:
        frontend_class = frontend_choices.get_class(args.frontend)
        if args.frontend == 'wav_frontend':
            frontend = frontend_class(cmvn_file=args.cmvn_file, **args.frontend_conf)
@@ -220,7 +253,7 @@
        args.frontend = None
        args.frontend_conf = {}
        frontend = None
        input_size = args.input_size
        input_size = args.input_size if hasattr(args, "input_size") else None
    # data augmentation for spectrogram
    if args.specaug is not None:
@@ -266,7 +299,8 @@
            token_list=token_list,
            **args.model_conf,
        )
    elif args.model in ["paraformer", "paraformer_bert", "bicif_paraformer", "contextual_paraformer"]:
    elif args.model in ["paraformer", "paraformer_online", "paraformer_bert", "bicif_paraformer",
                        "contextual_paraformer"]:
        # predictor
        predictor_class = predictor_choices.get_class(args.predictor)
        predictor = predictor_class(**args.predictor_conf)
@@ -342,6 +376,50 @@
            token_list=token_list,
            **args.model_conf,
        )
    elif args.model == "rnnt" or args.model == "rnnt_unified":
        # 5. Decoder
        encoder_output_size = encoder.output_size()
        rnnt_decoder_class = rnnt_decoder_choices.get_class(args.rnnt_decoder)
        decoder = rnnt_decoder_class(
            vocab_size,
            **args.rnnt_decoder_conf,
        )
        decoder_output_size = decoder.output_size
        if getattr(args, "decoder", None) is not None:
            att_decoder_class = decoder_choices.get_class(args.decoder)
            att_decoder = att_decoder_class(
                vocab_size=vocab_size,
                encoder_output_size=encoder_output_size,
                **args.decoder_conf,
            )
        else:
            att_decoder = None
        # 6. Joint Network
        joint_network = JointNetwork(
            vocab_size,
            encoder_output_size,
            decoder_output_size,
            **args.joint_network_conf,
        )
        model_class = model_choices.get_class(args.model)
        # 7. Build model
        model = model_class(
            vocab_size=vocab_size,
            token_list=token_list,
            frontend=frontend,
            specaug=specaug,
            normalize=normalize,
            encoder=encoder,
            decoder=decoder,
            att_decoder=att_decoder,
            joint_network=joint_network,
            **args.model_conf,
        )
    else:
        raise NotImplementedError("Not supported model: {}".format(args.model))
@@ -349,4 +427,4 @@
    if args.init is not None:
        initialize(model, args.init)
    return model
    return model