From d80ac2fd2df4e7fb8a28acfa512bb11472b5cc99 Mon Sep 17 00:00:00 2001
From: liugz18 <57401541+liugz18@users.noreply.github.com>
Date: 星期四, 18 七月 2024 21:34:55 +0800
Subject: [PATCH] Rename 'res' in line 514 to avoid with naming conflict with line 365

---
 funasr/losses/label_smoothing_loss.py |   62 ++++++++++++++++++++++++++-----
 1 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/funasr/losses/label_smoothing_loss.py b/funasr/losses/label_smoothing_loss.py
index 28df73f..ffc38da 100644
--- a/funasr/losses/label_smoothing_loss.py
+++ b/funasr/losses/label_smoothing_loss.py
@@ -8,7 +8,7 @@
 
 import torch
 from torch import nn
-from funasr.modules.nets_utils import make_pad_mask
+from funasr.models.transformer.utils.nets_utils import make_pad_mask
 
 
 class LabelSmoothingLoss(nn.Module):
@@ -50,8 +50,8 @@
         """
         assert x.size(2) == self.size
         batch_size = x.size(0)
-        x = x.view(-1, self.size)
-        target = target.view(-1)
+        x = x.contiguous().view(-1, self.size)
+        target = target.contiguous().view(-1)
         with torch.no_grad():
             true_dist = x.clone()
             true_dist.fill_(self.smoothing / (self.size - 1))
@@ -65,17 +65,59 @@
 
 
 class SequenceBinaryCrossEntropy(nn.Module):
-    def __init__(
-            self,
-            normalize_length=False,
-            criterion=nn.BCEWithLogitsLoss(reduction="none")
-    ):
+    def __init__(self, normalize_length=False, criterion=nn.BCEWithLogitsLoss(reduction="none")):
         super().__init__()
         self.normalize_length = normalize_length
         self.criterion = criterion
 
     def forward(self, pred, label, lengths):
-        pad_mask = make_pad_mask(lengths, maxlen=pred.shape[1])
+        pad_mask = make_pad_mask(lengths, maxlen=pred.shape[1]).to(pred.device)
         loss = self.criterion(pred, label)
         denom = (~pad_mask).sum() if self.normalize_length else pred.shape[0]
-        return loss.masked_fill(pad_mask, 0).sum() / denom
+        return loss.masked_fill(pad_mask.unsqueeze(-1), 0).sum() / denom
+
+
+class NllLoss(nn.Module):
+    """Nll loss.
+
+    :param int size: the number of class
+    :param int padding_idx: ignored class id
+    :param bool normalize_length: normalize loss by sequence length if True
+    :param torch.nn.Module criterion: loss function
+    """
+
+    def __init__(
+        self,
+        size,
+        padding_idx,
+        normalize_length=False,
+        criterion=nn.NLLLoss(reduction="none"),
+    ):
+        """Construct an NllLoss object."""
+        super(NllLoss, self).__init__()
+        self.criterion = criterion
+        self.padding_idx = padding_idx
+        self.size = size
+        self.true_dist = None
+        self.normalize_length = normalize_length
+
+    def forward(self, x, target):
+        """Compute loss between x and target.
+
+        :param torch.Tensor x: prediction (batch, seqlen, class)
+        :param torch.Tensor target:
+            target signal masked with self.padding_id (batch, seqlen)
+        :return: scalar float value
+        :rtype torch.Tensor
+        """
+        assert x.size(2) == self.size
+        batch_size = x.size(0)
+        x = x.view(-1, self.size)
+        target = target.view(-1)
+        with torch.no_grad():
+            ignore = target == self.padding_idx  # (B,)
+            total = len(target) - ignore.sum().item()
+            target = target.masked_fill(ignore, 0)  # avoid -1 index
+        kl = self.criterion(x, target)
+        denom = total if self.normalize_length else batch_size
+        return kl.masked_fill(ignore, 0).sum() / denom

--
Gitblit v1.9.1