From acb9a0fec8d8a4dabeedcbb8e08c26f66d7083f0 Mon Sep 17 00:00:00 2001 From: haoneng.lhn <haoneng.lhn@alibaba-inc.com> Date: 星期五, 08 十二月 2023 16:19:00 +0800 Subject: [PATCH] fix loss normalization for ddp training --- docs/modelscope_pipeline/quick_start.md | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/modelscope_pipeline/quick_start.md b/docs/modelscope_pipeline/quick_start.md index 7e35e91..2b9219b 100644 --- a/docs/modelscope_pipeline/quick_start.md +++ b/docs/modelscope_pipeline/quick_start.md @@ -1,3 +1,5 @@ +([绠�浣撲腑鏂嘳(./quick_start_zh.md)|English) + # Quick Start > **Note**: @@ -221,5 +223,4 @@ If you want finetune with multi-GPUs, you could: ```shell CUDA_VISIBLE_DEVICES=1,2 python -m torch.distributed.launch --nproc_per_node 2 finetune.py > log.txt 2>&1 -``` - +``` \ No newline at end of file -- Gitblit v1.9.1