From 476dc3f30c014e0d2ebdc46ce0283ddbfe63eeb8 Mon Sep 17 00:00:00 2001
From: VirtuosoQ <2416050435@qq.com>
Date: 星期日, 28 四月 2024 16:37:54 +0800
Subject: [PATCH] 16:37 java_http_client
---
funasr/models/sense_voice/decoder.py | 24 +-----------------------
1 files changed, 1 insertions(+), 23 deletions(-)
diff --git a/funasr/models/sense_voice/decoder.py b/funasr/models/sense_voice/decoder.py
index 9087ea1..9fdb3bd 100644
--- a/funasr/models/sense_voice/decoder.py
+++ b/funasr/models/sense_voice/decoder.py
@@ -245,29 +245,7 @@
self.register_buffer("mask", mask, persistent=False)
self.use_padmask = kwargs.get("use_padmask", True)
- # def forward(self, x: Tensor, xa: Tensor, kv_cache: Optional[dict] = None):
- # """
- # x : torch.LongTensor, shape = (batch_size, <= n_ctx)
- # the text tokens
- # xa : torch.Tensor, shape = (batch_size, n_audio_ctx, n_audio_state)
- # the encoded audio features to be attended on
- # """
- # offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
- # x = (
- # self.token_embedding(x)
- # + self.positional_embedding[offset: offset + x.shape[-1]]
- # )
- # x = x.to(xa.dtype)
- #
- # for block in self.blocks:
- # x = block(x, xa, mask=self.mask, kv_cache=kv_cache)
- #
- # x = self.ln(x)
- # logits = (
- # x @ torch.transpose(self.token_embedding.weight.to(x.dtype), 0, 1)
- # ).float()
- #
- # return logits
+
def forward(
--
Gitblit v1.9.1