From 68852c3072581a98ec9d114f3d330ec3fdbb2ea2 Mon Sep 17 00:00:00 2001
From: speech_asr <wangjiaming.wjm@alibaba-inc.com>
Date: 星期四, 20 四月 2023 15:35:25 +0800
Subject: [PATCH] update
---
funasr/models/e2e_asr_mfcca.py | 17 +++++++----------
1 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/funasr/models/e2e_asr_mfcca.py b/funasr/models/e2e_asr_mfcca.py
index 0336133..efdd90d 100644
--- a/funasr/models/e2e_asr_mfcca.py
+++ b/funasr/models/e2e_asr_mfcca.py
@@ -17,13 +17,10 @@
)
from funasr.models.ctc import CTC
from funasr.models.decoder.abs_decoder import AbsDecoder
-from funasr.models.encoder.abs_encoder import AbsEncoder
-from funasr.models.frontend.abs_frontend import AbsFrontend
from funasr.models.preencoder.abs_preencoder import AbsPreEncoder
-from funasr.models.specaug.abs_specaug import AbsSpecAug
-from funasr.layers.abs_normalize import AbsNormalize
+from funasr.models.base_model import FunASRModel
from funasr.torch_utils.device_funcs import force_gatherable
-from funasr.train.abs_espnet_model import AbsESPnetModel
+
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
@@ -35,18 +32,18 @@
import pdb
import random
import math
-class MFCCA(AbsESPnetModel):
+class MFCCA(FunASRModel):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
token_list: Union[Tuple[str, ...], List[str]],
- frontend: Optional[AbsFrontend],
- specaug: Optional[AbsSpecAug],
- normalize: Optional[AbsNormalize],
+ frontend: Optional[torch.nn.Module],
+ specaug: Optional[torch.nn.Module],
+ normalize: Optional[torch.nn.Module],
preencoder: Optional[AbsPreEncoder],
- encoder: AbsEncoder,
+ encoder: torch.nn.Module,
decoder: AbsDecoder,
ctc: CTC,
rnnt_decoder: None,
--
Gitblit v1.9.1