From 94de39dde2e616a01683c518023d0fab72b4e103 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 19 二月 2024 22:21:50 +0800
Subject: [PATCH] aishell example

---
 funasr/models/sanm/decoder.py |   14 +++++++++-----
 1 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/funasr/models/sanm/decoder.py b/funasr/models/sanm/decoder.py
index 64033ad..3575282 100644
--- a/funasr/models/sanm/decoder.py
+++ b/funasr/models/sanm/decoder.py
@@ -1,3 +1,8 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 from typing import List
 from typing import Tuple
 import logging
@@ -14,7 +19,7 @@
 from funasr.models.sanm.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM
 from funasr.models.transformer.utils.repeat import repeat
 
-from funasr.utils.register import register_class, registry_tables
+from funasr.register import tables
 
 class DecoderLayerSANM(nn.Module):
     """Single decoder layer module.
@@ -190,13 +195,12 @@
         return x, memory, fsmn_cache, opt_cache
 
 
-@register_class("decoder_classes", "FsmnDecoder")
+@tables.register("decoder_classes", "FsmnDecoder")
 class FsmnDecoder(BaseTransformerDecoder):
     """
-    Author: Speech Lab of DAMO Academy, Alibaba Group
-    SCAMA: Streaming chunk-aware multihead attention for online end-to-end speech recognition
+    Author: Zhifu Gao, Shiliang Zhang, Ming Lei, Ian McLoughlin
+    San-m: Memory equipped self-attention for end-to-end speech recognition
     https://arxiv.org/abs/2006.01713
-
     """
     
     def __init__(

--
Gitblit v1.9.1