From 98abc0e5ac1a1da0fe1802d9ffb623802fbf0b2f Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期四, 29 六月 2023 16:30:39 +0800
Subject: [PATCH] update setup (#686)

---
 funasr/models/ctc.py |    7 -------
 1 files changed, 0 insertions(+), 7 deletions(-)

diff --git a/funasr/models/ctc.py b/funasr/models/ctc.py
index 64b8710..d3c10fa 100644
--- a/funasr/models/ctc.py
+++ b/funasr/models/ctc.py
@@ -2,7 +2,6 @@
 
 import torch
 import torch.nn.functional as F
-from typeguard import check_argument_types
 
 
 class CTC(torch.nn.Module):
@@ -25,7 +24,6 @@
         reduce: bool = True,
         ignore_nan_grad: bool = True,
     ):
-        assert check_argument_types()
         super().__init__()
         eprojs = encoder_output_size
         self.dropout_rate = dropout_rate
@@ -41,11 +39,6 @@
             if ignore_nan_grad:
                 logging.warning("ignore_nan_grad option is not supported for warp_ctc")
             self.ctc_loss = warp_ctc.CTCLoss(size_average=True, reduce=reduce)
-
-        elif self.ctc_type == "gtnctc":
-            from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
-
-            self.ctc_loss = GTNCTCLossFunction.apply
         else:
             raise ValueError(
                 f'ctc_type must be "builtin" or "warpctc": {self.ctc_type}'

--
Gitblit v1.9.1