From a4beddbe757ff34d90591c79d9ed8462752ee47c Mon Sep 17 00:00:00 2001
From: wucong.lyb <wucong.lyb@alibaba-inc.com>
Date: 星期四, 29 六月 2023 17:14:17 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR
---
funasr/models/ctc.py | 7 -------
1 files changed, 0 insertions(+), 7 deletions(-)
diff --git a/funasr/models/ctc.py b/funasr/models/ctc.py
index 64b8710..d3c10fa 100644
--- a/funasr/models/ctc.py
+++ b/funasr/models/ctc.py
@@ -2,7 +2,6 @@
import torch
import torch.nn.functional as F
-from typeguard import check_argument_types
class CTC(torch.nn.Module):
@@ -25,7 +24,6 @@
reduce: bool = True,
ignore_nan_grad: bool = True,
):
- assert check_argument_types()
super().__init__()
eprojs = encoder_output_size
self.dropout_rate = dropout_rate
@@ -41,11 +39,6 @@
if ignore_nan_grad:
logging.warning("ignore_nan_grad option is not supported for warp_ctc")
self.ctc_loss = warp_ctc.CTCLoss(size_average=True, reduce=reduce)
-
- elif self.ctc_type == "gtnctc":
- from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction
-
- self.ctc_loss = GTNCTCLossFunction.apply
else:
raise ValueError(
f'ctc_type must be "builtin" or "warpctc": {self.ctc_type}'
--
Gitblit v1.9.1