From a6889a31700bf3d610712c7fb5edecd06f78f0bf Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期四, 04 五月 2023 19:33:50 +0800
Subject: [PATCH] update

---
 funasr/models/e2e_asr_contextual_paraformer.py |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/funasr/models/e2e_asr_contextual_paraformer.py b/funasr/models/e2e_asr_contextual_paraformer.py
index 493b345..aced11e 100644
--- a/funasr/models/e2e_asr_contextual_paraformer.py
+++ b/funasr/models/e2e_asr_contextual_paraformer.py
@@ -280,8 +280,8 @@
         decoder_outs = self.decoder(
             encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, contextual_info=contextual_info
         ) 
-        decoder_out, _, attn = decoder_outs[0], decoder_outs[1], decoder_outs[2]
-        
+        decoder_out, _ = decoder_outs[0], decoder_outs[1]
+        '''
         if self.crit_attn_weight > 0 and attn.shape[-1] > 1:
             ideal_attn = ideal_attn + self.crit_attn_smooth / (self.crit_attn_smooth + 1.0)
             attn_non_blank = attn[:,:,:,:-1]
@@ -289,7 +289,9 @@
             loss_ideal = self.attn_loss(attn_non_blank.max(1)[0], ideal_attn_non_blank.to(attn.device))
         else:
             loss_ideal = None
-
+        '''
+        loss_ideal = None
+        
         if decoder_out_1st is None:
             decoder_out_1st = decoder_out
         # 2. Compute attention loss

--
Gitblit v1.9.1