From 54931dd4e1a099d7d6f144c4e12e5453deb3aa26 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期三, 28 六月 2023 10:41:57 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main

---
 funasr/bin/build_trainer.py |   35 +++++++++++++++++++++++++++++++++--
 1 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/funasr/bin/build_trainer.py b/funasr/bin/build_trainer.py
index c13f91b..267e405 100644
--- a/funasr/bin/build_trainer.py
+++ b/funasr/bin/build_trainer.py
@@ -23,10 +23,16 @@
         from funasr.tasks.asr import ASRTask as ASRTask
     elif mode == "paraformer":
         from funasr.tasks.asr import ASRTaskParaformer as ASRTask
+    elif mode == "paraformer_streaming":
+        from funasr.tasks.asr import ASRTaskParaformer as ASRTask
     elif mode == "paraformer_vad_punc":
         from funasr.tasks.asr import ASRTaskParaformer as ASRTask
     elif mode == "uniasr":
         from funasr.tasks.asr import ASRTaskUniASR as ASRTask
+    elif mode == "mfcca":
+        from funasr.tasks.asr import ASRTaskMFCCA as ASRTask
+    elif mode == "tp":
+        from funasr.tasks.asr import ASRTaskAligner as ASRTask
     else:
         raise ValueError("Unknown mode: {}".format(mode))
     parser = ASRTask.get_parser()
@@ -34,8 +40,23 @@
     return args, ASRTask
 
 
-def build_trainer(modelscope_dict, data_dir, output_dir, train_set="train", dev_set="validation", distributed=False,
-                  dataset_type="small", lr=None, batch_bins=None, max_epoch=None, mate_params=None):
+def build_trainer(modelscope_dict,
+                  data_dir,
+                  output_dir,
+                  train_set="train",
+                  dev_set="validation",
+                  distributed=False,
+                  dataset_type="small",
+                  batch_bins=None,
+                  max_epoch=None,
+                  optim=None,
+                  lr=None,
+                  scheduler=None,
+                  scheduler_conf=None,
+                  specaug=None,
+                  specaug_conf=None,
+                  param_dict=None,
+                  **kwargs):
     mode = modelscope_dict['mode']
     args, ASRTask = parse_args(mode=mode)
     # ddp related
@@ -94,8 +115,18 @@
     args.output_dir = output_dir
     args.gpu_id = args.local_rank
     args.config = finetune_config
+    if optim is not None:
+        args.optim = optim
     if lr is not None:
         args.optim_conf["lr"] = lr
+    if scheduler is not None:
+        args.scheduler = scheduler
+    if scheduler_conf is not None:
+        args.scheduler_conf = scheduler_conf
+    if specaug is not None:
+        args.specaug = specaug
+    if specaug_conf is not None:
+        args.specaug_conf = specaug_conf
     if max_epoch is not None:
         args.max_epoch = max_epoch
     if batch_bins is not None:

--
Gitblit v1.9.1