From 2e8dc0933f31bf449ecc11ac1b4dc1833fdaad42 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 20 二月 2024 18:01:15 +0800
Subject: [PATCH] train finetune

---
 funasr/train_utils/trainer.py          |    8 ++++----
 examples/aishell/paraformer/run.sh     |    3 ++-
 examples/aishell/branchformer/run.sh   |    3 ++-
 examples/aishell/e_branchformer/run.sh |    3 ++-
 examples/aishell/transformer/run.sh    |    3 ++-
 5 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/examples/aishell/branchformer/run.sh b/examples/aishell/branchformer/run.sh
index 090226b..f4842d0 100755
--- a/examples/aishell/branchformer/run.sh
+++ b/examples/aishell/branchformer/run.sh
@@ -105,7 +105,8 @@
   echo "stage 4: ASR Training"
 
   mkdir -p ${exp_dir}/exp/${model_dir}
-  log_file="${exp_dir}/exp/${model_dir}/train.log.txt"
+  current_time=$(date "+%Y-%m-%d_%H-%M")
+  log_file="${exp_dir}/exp/${model_dir}/train.log.txt.${current_time}"
   echo "log_file: ${log_file}"
 
   gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
diff --git a/examples/aishell/e_branchformer/run.sh b/examples/aishell/e_branchformer/run.sh
index 14cd64e..0855798 100755
--- a/examples/aishell/e_branchformer/run.sh
+++ b/examples/aishell/e_branchformer/run.sh
@@ -105,7 +105,8 @@
   echo "stage 4: ASR Training"
 
   mkdir -p ${exp_dir}/exp/${model_dir}
-  log_file="${exp_dir}/exp/${model_dir}/train.log.txt"
+  current_time=$(date "+%Y-%m-%d_%H-%M")
+  log_file="${exp_dir}/exp/${model_dir}/train.log.txt.${current_time}"
   echo "log_file: ${log_file}"
 
   gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
diff --git a/examples/aishell/paraformer/run.sh b/examples/aishell/paraformer/run.sh
index 90955f2..80f81b5 100755
--- a/examples/aishell/paraformer/run.sh
+++ b/examples/aishell/paraformer/run.sh
@@ -105,7 +105,8 @@
   echo "stage 4: ASR Training"
 
   mkdir -p ${exp_dir}/exp/${model_dir}
-  log_file="${exp_dir}/exp/${model_dir}/train.log.txt"
+  current_time=$(date "+%Y-%m-%d_%H-%M")
+  log_file="${exp_dir}/exp/${model_dir}/train.log.txt.${current_time}"
   echo "log_file: ${log_file}"
 
   gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
diff --git a/examples/aishell/transformer/run.sh b/examples/aishell/transformer/run.sh
index 90955f2..80f81b5 100755
--- a/examples/aishell/transformer/run.sh
+++ b/examples/aishell/transformer/run.sh
@@ -105,7 +105,8 @@
   echo "stage 4: ASR Training"
 
   mkdir -p ${exp_dir}/exp/${model_dir}
-  log_file="${exp_dir}/exp/${model_dir}/train.log.txt"
+  current_time=$(date "+%Y-%m-%d_%H-%M")
+  log_file="${exp_dir}/exp/${model_dir}/train.log.txt.${current_time}"
   echo "log_file: ${log_file}"
 
   gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 10f7f80..cc7b215 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -188,7 +188,7 @@
             epoch (int): The current epoch number.
         """
         self.model.train()
-        pbar = tqdm(colour="blue", desc=f"Training Epoch: {epoch + 1}", total=len(self.dataloader_train),
+        pbar = tqdm(colour="blue", desc=f"rank: {self.local_rank}, Training Epoch: {epoch + 1}", total=len(self.dataloader_train),
                     dynamic_ncols=True)
         
         # Set the number of steps for gradient accumulation
@@ -278,7 +278,7 @@
                     f"epoch: {epoch}/{self.max_epoch}, "
                     f"step: {batch_idx}/{len(self.dataloader_train)}, total: {self.batch_total}, "
                     f"(loss: {loss.detach().cpu().item():.3f}), "
-                    f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}"
+                    f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
                     f"{speed_stats}, "
                     f"{gpu_info}"
                 )
@@ -307,7 +307,7 @@
         """
         self.model.eval()
         with torch.no_grad():
-            pbar = tqdm(colour="red", desc=f"Training Epoch: {epoch + 1}", total=len(self.dataloader_val),
+            pbar = tqdm(colour="red", desc=f"rank: {self.local_rank}, Validation Epoch: {epoch + 1}", total=len(self.dataloader_val),
                         dynamic_ncols=True)
             speed_stats = {}
             time5 = time.perf_counter()
@@ -343,7 +343,7 @@
                         f"validation epoch: {epoch}/{self.max_epoch}, "
                         f"step: {batch_idx}/{len(self.dataloader_val)}, "
                         f"(loss: {loss.detach().cpu().item():.3f}), "
-                        f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}"
+                        f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
                         f"{speed_stats}, "
                     )
                     pbar.set_description(description)

--
Gitblit v1.9.1