From a7ab8bd688d21e45f194dd9d87cb060d2cbc21bd Mon Sep 17 00:00:00 2001
From: Lizerui9926 <110582652+Lizerui9926@users.noreply.github.com>
Date: 星期二, 14 三月 2023 16:45:30 +0800
Subject: [PATCH] Merge pull request #230 from alibaba-damo-academy/dev_wjm

---
 funasr/modules/eend_ola/encoder.py |   16 +++++++++++-----
 1 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/funasr/modules/eend_ola/encoder.py b/funasr/modules/eend_ola/encoder.py
index 17d11ac..4999031 100644
--- a/funasr/modules/eend_ola/encoder.py
+++ b/funasr/modules/eend_ola/encoder.py
@@ -1,5 +1,5 @@
 import math
-import numpy as np
+
 import torch
 import torch.nn.functional as F
 from torch import nn
@@ -81,10 +81,16 @@
         return self.dropout(x)
 
 
-class TransformerEncoder(nn.Module):
-    def __init__(self, idim, n_layers, n_units,
-                 e_units=2048, h=8, dropout_rate=0.1, use_pos_emb=False):
-        super(TransformerEncoder, self).__init__()
+class EENDOLATransformerEncoder(nn.Module):
+    def __init__(self,
+                 idim: int,
+                 n_layers: int,
+                 n_units: int,
+                 e_units: int = 2048,
+                 h: int = 8,
+                 dropout_rate: float = 0.1,
+                 use_pos_emb: bool = False):
+        super(EENDOLATransformerEncoder, self).__init__()
         self.lnorm_in = nn.LayerNorm(n_units)
         self.n_layers = n_layers
         self.dropout = nn.Dropout(dropout_rate)

--
Gitblit v1.9.1