From 2779602177ae5374547c7a7e17de0b11a166326d Mon Sep 17 00:00:00 2001 From: 游雁 <zhifu.gzf@alibaba-inc.com> Date: 星期一, 29 四月 2024 15:08:46 +0800 Subject: [PATCH] Merge branch 'dev_gzf_exp' of github.com:alibaba-damo-academy/FunASR into dev_gzf_exp merge --- funasr/schedulers/__init__.py | 26 ++++++++++++++------------ 1 files changed, 14 insertions(+), 12 deletions(-) diff --git a/funasr/schedulers/__init__.py b/funasr/schedulers/__init__.py index cba286a..39f8c0e 100644 --- a/funasr/schedulers/__init__.py +++ b/funasr/schedulers/__init__.py @@ -6,18 +6,20 @@ from funasr.schedulers.noam_lr import NoamLR from funasr.schedulers.tri_stage_scheduler import TriStageLR from funasr.schedulers.warmup_lr import WarmupLR +from funasr.schedulers.lambdalr_cus import CustomLambdaLR scheduler_classes = dict( - ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau, - lambdalr=torch.optim.lr_scheduler.LambdaLR, - steplr=torch.optim.lr_scheduler.StepLR, - multisteplr=torch.optim.lr_scheduler.MultiStepLR, - exponentiallr=torch.optim.lr_scheduler.ExponentialLR, - CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR, - noamlr=NoamLR, - warmuplr=WarmupLR, - tri_stage=TriStageLR, - cycliclr=torch.optim.lr_scheduler.CyclicLR, - onecyclelr=torch.optim.lr_scheduler.OneCycleLR, - CosineAnnealingWarmRestarts=torch.optim.lr_scheduler.CosineAnnealingWarmRestarts, + ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau, + lambdalr=torch.optim.lr_scheduler.LambdaLR, + steplr=torch.optim.lr_scheduler.StepLR, + multisteplr=torch.optim.lr_scheduler.MultiStepLR, + exponentiallr=torch.optim.lr_scheduler.ExponentialLR, + CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR, + noamlr=NoamLR, + warmuplr=WarmupLR, + tri_stage=TriStageLR, + cycliclr=torch.optim.lr_scheduler.CyclicLR, + onecyclelr=torch.optim.lr_scheduler.OneCycleLR, + CosineAnnealingWarmRestarts=torch.optim.lr_scheduler.CosineAnnealingWarmRestarts, + custom_lambdalr=CustomLambdaLR, ) -- Gitblit v1.9.1