From 0063e4356a8e8e7bb984a6d847afbb11d5fbaa4a Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期五, 21 七月 2023 15:07:34 +0800
Subject: [PATCH] Merge pull request #768 from alibaba-damo-academy/dev_lhn

---
 funasr/tasks/abs_task.py |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/funasr/tasks/abs_task.py b/funasr/tasks/abs_task.py
index 91d33c5..f7f13d2 100644
--- a/funasr/tasks/abs_task.py
+++ b/funasr/tasks/abs_task.py
@@ -71,6 +71,7 @@
 from funasr.utils.types import str_or_none
 from funasr.utils.wav_utils import calc_shape, generate_data_list, filter_wav_text
 from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
+from funasr.modules.lora.utils import mark_only_lora_as_trainable
 
 try:
     import wandb
@@ -952,6 +953,18 @@
             default=None,
             help="oss bucket.",
         )
+        group.add_argument(
+            "--enable_lora",
+            type=str2bool,
+            default=False,
+            help="Apply lora for finetuning.",
+        )
+        group.add_argument(
+            "--lora_bias",
+            type=str,
+            default="none",
+            help="lora bias.",
+        )
 
         cls.trainer.add_arguments(parser)
         cls.add_task_arguments(parser)
@@ -1246,6 +1259,8 @@
             dtype=getattr(torch, args.train_dtype),
             device="cuda" if args.ngpu > 0 else "cpu",
         )
+        if args.enable_lora:
+            mark_only_lora_as_trainable(model, args.lora_bias)
         for t in args.freeze_param:
             for k, p in model.named_parameters():
                 if k.startswith(t + ".") or k == t:

--
Gitblit v1.9.1