From c7c97cb0fe91b06dcbc391b5a7dee92d496dc894 Mon Sep 17 00:00:00 2001
From: aky15 <ankeyuthu@gmail.com>
Date: 星期四, 11 一月 2024 17:52:27 +0800
Subject: [PATCH] Update rwkv_encoder.py

---
 funasr/models/encoder/rwkv_encoder.py |   17 +++++++++--------
 1 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/funasr/models/encoder/rwkv_encoder.py b/funasr/models/encoder/rwkv_encoder.py
index 8a33520..dc1f207 100644
--- a/funasr/models/encoder/rwkv_encoder.py
+++ b/funasr/models/encoder/rwkv_encoder.py
@@ -113,11 +113,12 @@
         x = self.embed_norm(x)
         olens = mask.eq(0).sum(1)
 
-        for block in self.rwkv_blocks:
-            x, _ = block(x)
-        # for streaming inference
-        # xs_pad = self.rwkv_infer(xs_pad)
-
+        if self.training:
+            for block in self.rwkv_blocks:
+                x, _ = block(x)
+        else:
+            x = self.rwkv_infer(x)
+            
         x = self.final_norm(x)
 
         if self.time_reduction_factor > 1:
@@ -136,9 +137,9 @@
 
         state = [
             torch.zeros(
-                (batch_size, 1, hidden_sizes[i], self.num_rwkv_blocks),
+                (batch_size, 1, hidden_sizes[i], self.num_blocks),
                 dtype=torch.float32,
-                device=self.device,
+                device=xs_pad.device,
             )
             for i in range(5)
         ]
@@ -151,5 +152,5 @@
             for idx, block in enumerate(self.rwkv_blocks):
                 x_t, state = block(x_t, state=state)
             xs_out.append(x_t)
-        xs_out = torch.stack(xs_out, dim=1)
+        xs_out = torch.cat(xs_out, dim=1)
         return xs_out

--
Gitblit v1.9.1