From cad4b2dc391f114903d4065c02ac64494b0cac2f Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期五, 21 七月 2023 14:46:46 +0800
Subject: [PATCH] Update RESULTS.md

---
 egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md |    6 +++---
 1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md
index 5f1abb6..edc2cf1 100644
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md
@@ -72,7 +72,7 @@
 ## Fine-tuning
 - Train config: 
   - Training data: aishell-1
-  - Training info: lr 0.0002, batch size 2000, 2 gpu, acc_grad 1, 20 epochs
+  - Training info: lr 0.0002, dataset_type: small, batch bins 2000, 2 gpu, acc_grad 1, 20 epochs
   - Decoding info: beam_size 1, average_num 10
 
 | model    | dev cer(%) | test cer(%) |
@@ -82,7 +82,7 @@
 
 - Train config: 
   - Training data: 16k sichuan dialect
-  - Training info: lr 0.0002, batch size 2000, 2 gpu, acc_grad 1, 20 epochs
+  - Training info: lr 0.0002, dataset_type: small, batch bins 2000, 2 gpu, acc_grad 1, 20 epochs
   - Decoding info: beam_size 1, average_num 10
   
   
@@ -99,7 +99,7 @@
 ## Lora Fine-tuning
 - Train config: 
   - Training data: 16k sichuan dialect
-  - Training info: lr 0.0002, batch size 2000, 2 gpu, acc_grad 1, 20 epochs
+  - Training info: lr 0.0002, dataset_type: small, batch bins 2000, 2 gpu, acc_grad 1, 20 epochs
   - Lora info: lora_bias: "all", lora_list ['q','v'], lora_rank:8, lora_alpha:16, lora_dropout:0.1
   - Decoding info: beam_size 1, average_num 10
   

--
Gitblit v1.9.1