From c3442d9566f5a2011c95b0d2998958a1b5348564 Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期五, 12 一月 2024 18:04:42 +0800
Subject: [PATCH] update device

---
 funasr/models/paraformer/decoder.py |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/funasr/models/paraformer/decoder.py b/funasr/models/paraformer/decoder.py
index f59ce4d..1df27e8 100644
--- a/funasr/models/paraformer/decoder.py
+++ b/funasr/models/paraformer/decoder.py
@@ -17,7 +17,7 @@
 from funasr.models.transformer.embedding import PositionalEncoding
 from funasr.models.transformer.utils.nets_utils import make_pad_mask
 from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForward
-from funasr.utils.register import register_class, registry_tables
+from funasr.register import tables
 
 class DecoderLayerSANM(nn.Module):
     """Single decoder layer module.
@@ -200,7 +200,7 @@
         return x, memory, fsmn_cache, opt_cache
 
 
-@register_class("decoder_classes", "ParaformerSANMDecoder")
+@tables.register("decoder_classes", "ParaformerSANMDecoder")
 class ParaformerSANMDecoder(BaseTransformerDecoder):
     """
     Author: Speech Lab of DAMO Academy, Alibaba Group
@@ -525,8 +525,8 @@
         return y, new_cache
 
 
-@register_class("decoder_classes", "ParaformerDecoderSAN")
-class ParaformerDecoderSAN(BaseTransformerDecoder):
+@tables.register("decoder_classes", "ParaformerSANDecoder")
+class ParaformerSANDecoder(BaseTransformerDecoder):
     """
     Author: Speech Lab of DAMO Academy, Alibaba Group
     Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition

--
Gitblit v1.9.1