From 24f73665e2d8ea8e4de2fe4f900bc539d7f7b989 Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期一, 17 四月 2023 15:49:45 +0800
Subject: [PATCH] Merge pull request #367 from alibaba-damo-academy/dev_lhn2

---
 funasr/modules/embedding.py |   32 +++++++++++++++++++++++++++++---
 1 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/funasr/modules/embedding.py b/funasr/modules/embedding.py
index e4f9bff..4b292a7 100644
--- a/funasr/modules/embedding.py
+++ b/funasr/modules/embedding.py
@@ -8,7 +8,7 @@
 
 import math
 import torch
-
+import torch.nn.functional as F
 
 def _pre_hook(
     state_dict,
@@ -407,11 +407,37 @@
 
         return x + position_encoding
 
-    def forward_chunk(self, x, cache=None):
+class StreamSinusoidalPositionEncoder(torch.nn.Module):
+    '''
+
+    '''
+    def __int__(self, d_model=80, dropout_rate=0.1):
+        pass
+
+    def encode(self, positions: torch.Tensor = None, depth: int = None, dtype: torch.dtype = torch.float32):
+        batch_size = positions.size(0)
+        positions = positions.type(dtype)
+        log_timescale_increment = torch.log(torch.tensor([10000], dtype=dtype)) / (depth / 2 - 1)
+        inv_timescales = torch.exp(torch.arange(depth / 2).type(dtype) * (-log_timescale_increment))
+        inv_timescales = torch.reshape(inv_timescales, [batch_size, -1])
+        scaled_time = torch.reshape(positions, [1, -1, 1]) * torch.reshape(inv_timescales, [1, 1, -1])
+        encoding = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)
+        return encoding.type(dtype)
+
+    def forward(self, x, cache=None):
         start_idx = 0
+        pad_left = 0
+        pad_right = 0
         batch_size, timesteps, input_dim = x.size()
         if cache is not None:
             start_idx = cache["start_idx"]
+            pad_left = cache["left"]
+            pad_right = cache["right"]
         positions = torch.arange(1, timesteps+start_idx+1)[None, :]
         position_encoding = self.encode(positions, input_dim, x.dtype).to(x.device)
-        return x + position_encoding[:, start_idx: start_idx + timesteps]
+        outputs = x + position_encoding[:, start_idx: start_idx + timesteps]
+        outputs = outputs.transpose(1, 2)
+        outputs = F.pad(outputs, (pad_left, pad_right))
+        outputs = outputs.transpose(1, 2)
+        return outputs
+       

--
Gitblit v1.9.1