From 2175736ab0e2752837db102ffc27277339f19b5b Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 14:02:18 +0800
Subject: [PATCH] Merge branch 'dev_gzf_deepspeed' into main

---
 examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh         |   47 +
 examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh                |   65 ++
 funasr/train_utils/trainer.py                                                   |    9 
 funasr/datasets/openai_datasets/datasets.py                                     |  224 ++++++++
 examples/wenetspeech/conformer/run.sh                                           |    2 
 examples/industrial_data_pretraining/deepspeed/ds_stage3.json                   |   41 +
 funasr/models/transformer/attention.py                                          |    5 
 funasr/train_utils/model_summary.py                                             |   16 
 funasr/datasets/sense_voice_datasets/datasets.py                                |    1 
 examples/industrial_data_pretraining/deepspeed/ds_z2_offload_config.json        |   32 +
 examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml     |   81 +++
 funasr/bin/train.py                                                             |    9 
 examples/industrial_data_pretraining/llm_asr/demo_speech2text.py                |   48 +
 examples/industrial_data_pretraining/deepspeed/ds_stage1.json                   |   33 +
 funasr/datasets/openai_datasets/index_ds.py                                     |  106 ++++
 funasr/models/llm_asr/adaptor.py                                                |   42 
 examples/industrial_data_pretraining/deepspeed/ds_z0_config.json                |   28 +
 examples/industrial_data_pretraining/deepspeed/ds_stage2.json                   |   33 +
 examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_transformer.yaml |   81 +++
 examples/industrial_data_pretraining/llm_asr/infer_speech2text.sh               |    9 
 examples/industrial_data_pretraining/deepspeed/ds_z2_config.json                |   28 +
 funasr/models/llm_asr/model.py                                                  |  432 ++++++++++++++++
 examples/industrial_data_pretraining/deepspeed/ds_z3_config.json                |   30 +
 funasr/bin/train_ds.py                                                          |   10 
 funasr/auto/auto_model.py                                                       |    2 
 funasr/datasets/dataloader_entry.py                                             |   21 
 /dev/null                                                                       |    1 
 funasr/train_utils/trainer_ds.py                                                |    6 
 examples/industrial_data_pretraining/deepspeed/ds_z3_offload_config.json        |   38 +
 funasr/datasets/audio_datasets/samplers.py                                      |   27 
 funasr/datasets/openai_datasets/__init__.py                                     |    0 
 31 files changed, 1,462 insertions(+), 45 deletions(-)

diff --git a/examples/industrial_data_pretraining/deepspeed/ds_stage1.json b/examples/industrial_data_pretraining/deepspeed/ds_stage1.json
new file mode 100644
index 0000000..51804c1
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_stage1.json
@@ -0,0 +1,33 @@
+{
+  "train_micro_batch_size_per_gpu": 1,
+  "gradient_accumulation_steps": 1,
+  "steps_per_print": 100,
+  "gradient_clipping": 5,
+  "fp16": {
+    "enabled": false,
+    "auto_cast": false,
+    "loss_scale": 0,
+    "initial_scale_power": 16,
+    "loss_scale_window": 1000,
+    "hysteresis": 2,
+    "consecutive_hysteresis": false,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+   "enabled": true
+  },
+  "zero_force_ds_cpu_optimizer": false,
+  "zero_optimization": {
+    "stage": 1,
+    "offload_optimizer": {
+      "device": "none",
+      "pin_memory": true
+    },
+    "allgather_partitions": true,
+    "allgather_bucket_size": 5e8,
+    "overlap_comm": true,
+    "reduce_scatter": true,
+    "reduce_bucket_size": 5e8,
+    "contiguous_gradients" : true
+  }
+}
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_stage2.json b/examples/industrial_data_pretraining/deepspeed/ds_stage2.json
new file mode 100644
index 0000000..c11b7d6
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_stage2.json
@@ -0,0 +1,33 @@
+{
+  "train_micro_batch_size_per_gpu": 1,
+  "gradient_accumulation_steps": 1,
+  "steps_per_print": 100,
+  "gradient_clipping": 5,
+  "fp16": {
+    "enabled": false,
+    "auto_cast": false,
+    "loss_scale": 0,
+    "initial_scale_power": 16,
+    "loss_scale_window": 1000,
+    "hysteresis": 2,
+    "consecutive_hysteresis": false,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+   "enabled": true
+  },
+  "zero_force_ds_cpu_optimizer": false,
+  "zero_optimization": {
+    "stage": 2,
+    "offload_optimizer": {
+      "device": "none",
+      "pin_memory": true
+    },
+    "allgather_partitions": true,
+    "allgather_bucket_size": 5e8,
+    "overlap_comm": false,
+    "reduce_scatter": true,
+    "reduce_bucket_size": 5e8,
+    "contiguous_gradients" : true
+  }
+}
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_stage3.json b/examples/industrial_data_pretraining/deepspeed/ds_stage3.json
new file mode 100644
index 0000000..ba38293
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_stage3.json
@@ -0,0 +1,41 @@
+{
+  "train_micro_batch_size_per_gpu": 1,
+  "gradient_accumulation_steps": 1,
+  "steps_per_print": 100,
+  "gradient_clipping": 5,
+  "fp16": {
+    "enabled": false,
+    "auto_cast": false,
+    "loss_scale": 0,
+    "initial_scale_power": 16,
+    "loss_scale_window": 1000,
+    "hysteresis": 2,
+    "consecutive_hysteresis": false,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+   "enabled": true
+  },
+  "zero_force_ds_cpu_optimizer": false,
+  "zero_optimization": {
+    "stage": 3,
+    "offload_optimizer": {
+      "device": "none",
+      "pin_memory": true
+    },
+    "offload_param": {
+      "device": "none",
+      "pin_memory": true
+    },
+    "allgather_partitions": true,
+    "allgather_bucket_size": 5e8,
+    "overlap_comm": true,
+    "reduce_scatter": true,
+    "reduce_bucket_size": 5e8,
+    "contiguous_gradients" : true,
+    "stage3_max_live_parameters": 1e9,
+    "stage3_max_reuse_distance": 1e9,
+    "stage3_prefetch_bucket_size": 5e8,
+    "stage3_param_persistence_threshold": 1e5
+  }
+}
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z0_config.json b/examples/industrial_data_pretraining/deepspeed/ds_z0_config.json
new file mode 100644
index 0000000..ed32667
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_z0_config.json
@@ -0,0 +1,28 @@
+{
+  "train_batch_size": "auto",
+  "train_micro_batch_size_per_gpu": "auto",
+  "gradient_accumulation_steps": "auto",
+  "gradient_clipping": "auto",
+  "zero_allow_untested_optimizer": true,
+  "fp16": {
+    "enabled": "auto",
+    "loss_scale": 0,
+    "loss_scale_window": 1000,
+    "initial_scale_power": 16,
+    "hysteresis": 2,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+    "enabled": "auto"
+  },
+  "zero_optimization": {
+    "stage": 0,
+    "allgather_partitions": true,
+    "allgather_bucket_size": 5e8,
+    "overlap_comm": true,
+    "reduce_scatter": true,
+    "reduce_bucket_size": 5e8,
+    "contiguous_gradients": true,
+    "round_robin_gradients": true
+  }
+}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z2_config.json b/examples/industrial_data_pretraining/deepspeed/ds_z2_config.json
new file mode 100644
index 0000000..0a1bd1d
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_z2_config.json
@@ -0,0 +1,28 @@
+{
+  "train_batch_size": "auto",
+  "train_micro_batch_size_per_gpu": "auto",
+  "gradient_accumulation_steps": "auto",
+  "gradient_clipping": "auto",
+  "zero_allow_untested_optimizer": true,
+  "fp16": {
+    "enabled": "auto",
+    "loss_scale": 0,
+    "loss_scale_window": 1000,
+    "initial_scale_power": 16,
+    "hysteresis": 2,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+    "enabled": "auto"
+  },
+  "zero_optimization": {
+    "stage": 2,
+    "allgather_partitions": true,
+    "allgather_bucket_size": 5e8,
+    "overlap_comm": true,
+    "reduce_scatter": true,
+    "reduce_bucket_size": 5e8,
+    "contiguous_gradients": true,
+    "round_robin_gradients": true
+  }
+}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z2_offload_config.json b/examples/industrial_data_pretraining/deepspeed/ds_z2_offload_config.json
new file mode 100644
index 0000000..7a39836
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_z2_offload_config.json
@@ -0,0 +1,32 @@
+{
+  "train_batch_size": "auto",
+  "train_micro_batch_size_per_gpu": "auto",
+  "gradient_accumulation_steps": "auto",
+  "gradient_clipping": "auto",
+  "zero_allow_untested_optimizer": true,
+  "fp16": {
+    "enabled": "auto",
+    "loss_scale": 0,
+    "loss_scale_window": 1000,
+    "initial_scale_power": 16,
+    "hysteresis": 2,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+    "enabled": "auto"
+  },
+  "zero_optimization": {
+    "stage": 2,
+    "offload_optimizer": {
+      "device": "cpu",
+      "pin_memory": true
+    },
+    "allgather_partitions": true,
+    "allgather_bucket_size": 5e8,
+    "overlap_comm": true,
+    "reduce_scatter": true,
+    "reduce_bucket_size": 5e8,
+    "contiguous_gradients": true,
+    "round_robin_gradients": true
+  }
+}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z3_config.json b/examples/industrial_data_pretraining/deepspeed/ds_z3_config.json
new file mode 100644
index 0000000..ccf9560
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_z3_config.json
@@ -0,0 +1,30 @@
+{
+  "train_batch_size": "auto",
+  "train_micro_batch_size_per_gpu": "auto",
+  "gradient_accumulation_steps": "auto",
+  "gradient_clipping": "auto",
+  "zero_allow_untested_optimizer": true,
+  "fp16": {
+    "enabled": "auto",
+    "loss_scale": 0,
+    "loss_scale_window": 1000,
+    "initial_scale_power": 16,
+    "hysteresis": 2,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+    "enabled": "auto"
+  },
+  "zero_optimization": {
+    "stage": 3,
+    "overlap_comm": true,
+    "contiguous_gradients": true,
+    "sub_group_size": 1e9,
+    "reduce_bucket_size": "auto",
+    "stage3_prefetch_bucket_size": "auto",
+    "stage3_param_persistence_threshold": "auto",
+    "stage3_max_live_parameters": 1e9,
+    "stage3_max_reuse_distance": 1e9,
+    "stage3_gather_16bit_weights_on_model_save": true
+  }
+}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z3_offload_config.json b/examples/industrial_data_pretraining/deepspeed/ds_z3_offload_config.json
new file mode 100644
index 0000000..026aabb
--- /dev/null
+++ b/examples/industrial_data_pretraining/deepspeed/ds_z3_offload_config.json
@@ -0,0 +1,38 @@
+{
+  "train_batch_size": "auto",
+  "train_micro_batch_size_per_gpu": "auto",
+  "gradient_accumulation_steps": "auto",
+  "gradient_clipping": "auto",
+  "zero_allow_untested_optimizer": true,
+  "fp16": {
+    "enabled": "auto",
+    "loss_scale": 0,
+    "loss_scale_window": 1000,
+    "initial_scale_power": 16,
+    "hysteresis": 2,
+    "min_loss_scale": 1
+  },
+  "bf16": {
+    "enabled": "auto"
+  },
+  "zero_optimization": {
+    "stage": 3,
+    "offload_optimizer": {
+      "device": "cpu",
+      "pin_memory": true
+    },
+    "offload_param": {
+      "device": "cpu",
+      "pin_memory": true
+    },
+    "overlap_comm": true,
+    "contiguous_gradients": true,
+    "sub_group_size": 1e9,
+    "reduce_bucket_size": "auto",
+    "stage3_prefetch_bucket_size": "auto",
+    "stage3_param_persistence_threshold": "auto",
+    "stage3_max_live_parameters": 1e9,
+    "stage3_max_reuse_distance": 1e9,
+    "stage3_gather_16bit_weights_on_model_save": true
+  }
+}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
new file mode 100644
index 0000000..483f219
--- /dev/null
+++ b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
@@ -0,0 +1,81 @@
+# This is an example that demonstrates how to configure a model file.
+# You can modify the configuration according to your own requirements.
+
+# to print the register_table:
+# from funasr.register import tables
+# tables.print()
+
+# network architecture
+model: LLMASR2
+model_conf:
+    lsm_weight: 0.1     # label smoothing option
+    length_normalized_loss: true
+
+# encoder
+audio_encoder: "/nfs/zhifu.gzf/init_model/SenseVoiceModelscope"
+audio_encoder_conf:
+    hub: ms
+    freeze: true
+
+llm: Qwen1.5-7b-chat
+llm_conf:
+  hub: hf
+  freeze: true
+  init_param_path: "/nfs/zhifu.gzf/init_model/qwen/Qwen1___5-7B-Chat_raw"
+
+audio_adaptor: Transformer
+audio_adaptor_conf:
+  downsample_rate: 2
+  llm_dim: 4096
+  encoder_dim: 1280
+  n_layer: 0
+
+# frontend related
+frontend: WhisperFrontend
+frontend_conf:
+    fs: 16000
+    whisper_model: large-v3
+    do_pad_trim: false
+    permute: false # true: [bs, frames, dims]; false: [bs, dims, frames]
+    filters_path: "/nfs/zhifu.gzf/init_model/SenseVoiceModelscope/assets/mel_filters.npz"
+
+
+
+train_conf:
+  accum_grad: 1
+  grad_clip: 5
+  max_epoch: 15
+  keep_nbest_models: 10
+  log_interval: 10
+
+optim: adamw
+optim_conf:
+   lr: 0.0001
+   weight_decay: 0.000000
+
+scheduler: warmuplr
+scheduler_conf:
+   warmup_steps: 1500
+
+dataset: OpenAIDataset
+dataset_conf:
+  index_ds: OpenAIIndexDSJsonl
+  batch_sampler: BatchSampler
+  batch_type: token
+  batch_size: 900
+  max_token_length: 1024
+  shuffle: true
+  sort_size: 1024
+  batch_size_scale_ratio_max: 2
+  num_workers: 4
+  audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate}
+  audio_encoder_downsample_rate: 2
+  data_split_num: 512
+  batch_size_sample_max: 15
+  retry: 20
+
+
+tokenizer: HuggingfaceTokenizer
+tokenizer_conf:
+  init_param_path: "/nfs/zhifu.gzf/init_model/qwen/Qwen1___5-7B-Chat_raw"
+
diff --git a/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_transformer.yaml b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_transformer.yaml
new file mode 100644
index 0000000..c2f6419
--- /dev/null
+++ b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_transformer.yaml
@@ -0,0 +1,81 @@
+# This is an example that demonstrates how to configure a model file.
+# You can modify the configuration according to your own requirements.
+
+# to print the register_table:
+# from funasr.register import tables
+# tables.print()
+
+# network architecture
+model: LLMASR2
+model_conf:
+    lsm_weight: 0.1     # label smoothing option
+    length_normalized_loss: true
+
+# encoder
+audio_encoder: "/nfs/zhifu.gzf/init_model/SenseVoiceModelscope"
+audio_encoder_conf:
+    hub: ms
+    freeze: true
+
+llm: Qwen1.5-7b-chat
+llm_conf:
+  hub: hf
+  freeze: true
+  init_param_path: "/nfs/zhifu.gzf/init_model/qwen/Qwen1___5-7B-Chat_raw"
+
+audio_adaptor: Transformer
+audio_adaptor_conf:
+  downsample_rate: 2
+  llm_dim: 4096
+  encoder_dim: 1280
+  n_layer: 2
+
+# frontend related
+frontend: WhisperFrontend
+frontend_conf:
+    fs: 16000
+    whisper_model: large-v3
+    do_pad_trim: false
+    permute: false # true: [bs, frames, dims]; false: [bs, dims, frames]
+    filters_path: "/nfs/zhifu.gzf/init_model/SenseVoiceModelscope/assets/mel_filters.npz"
+
+
+
+train_conf:
+  accum_grad: 1
+  grad_clip: 5
+  max_epoch: 15
+  keep_nbest_models: 10
+  log_interval: 10
+
+optim: adamw
+optim_conf:
+   lr: 0.0001
+   weight_decay: 0.000000
+
+scheduler: warmuplr
+scheduler_conf:
+   warmup_steps: 1500
+
+dataset: OpenAIDataset
+dataset_conf:
+  index_ds: OpenAIIndexDSJsonl
+  batch_sampler: BatchSampler
+  batch_type: token
+  batch_size: 900
+  max_token_length: 1024
+  shuffle: true
+  sort_size: 1024
+  batch_size_scale_ratio_max: 2
+  num_workers: 4
+  audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate}
+  audio_encoder_downsample_rate: 2
+  data_split_num: 512
+  batch_size_sample_max: 15
+  retry: 20
+
+
+tokenizer: HuggingfaceTokenizer
+tokenizer_conf:
+  init_param_path: "/nfs/zhifu.gzf/init_model/qwen/Qwen1___5-7B-Chat_raw"
+
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
new file mode 100644
index 0000000..e5e3e23
--- /dev/null
+++ b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+import json
+import os
+import sys
+
+from funasr import AutoModel
+
+ckpt_dir = "/nfs/beinian.lzr/workspace/GPT-4o/Exp/exp6/5m-8gpu/exp6_speech2text_linear_ddp_0609"
+ckpt_id = "model.pt.ep0.90000"
+jsonl = (
+    "/nfs/beinian.lzr/workspace/GPT-4o/Data/Speech2Text/TestData/aishell1_test_speech2text.jsonl"
+)
+output_dir = f"{os.path.join(ckpt_dir, ckpt_id)}"
+device = "cuda:0"
+
+ckpt_dir = sys.argv[1]
+ckpt_id = sys.argv[2]
+jsonl = sys.argv[3]
+output_dir = sys.argv[4]
+device = sys.argv[5]
+
+model = AutoModel(
+    model=ckpt_dir,
+    init_param=f"{os.path.join(ckpt_dir, ckpt_id)}",
+    output_dir=output_dir,
+    device=device,
+)
+
+
+with open(jsonl, "r") as f:
+    lines = f.readlines()
+
+tearchforing = False
+for i, line in enumerate(lines):
+    data_dict = json.loads(line.strip())
+    data = data_dict["messages"]
+
+    res = model.generate(
+        input=[data],
+        tearchforing=tearchforing,
+        cache={},
+    )
+
+    print(res)
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
new file mode 100644
index 0000000..d4c409b
--- /dev/null
+++ b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
@@ -0,0 +1,65 @@
+
+
+ckpt_id="model.pt.ep0.90000"
+device="cuda:0"
+
+ckpt_id=$1
+device=$2
+
+ckpt_dir="/nfs/beinian.lzr/workspace/GPT-4o/Exp/exp6/5m-8gpu/exp6_speech2text_linear_ddp_0609"
+jsonl_dir="/nfs/beinian.lzr/workspace/GPT-4o/Data/Speech2Text/TestData"
+
+out_dir="${ckpt_dir}/inference-${ckpt_id}"
+mkdir -p ${out_dir}
+for data_set in "librispeech_test_clean_speech2text.jsonl" "librispeech_test_other_speech2text.jsonl"; do
+    jsonl=${jsonl_dir}/${data_set}
+    output_dir=${out_dir}/${data_set}
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/1best_recog/text_tn
+    ref_file=${output_dir}/1best_recog/label
+
+    python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
+
+    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=false
+
+done
+
+
+for data_set in "aishell1_test_speech2text.jsonl" "aishell2_ios_test_speech2text.jsonl" "librispeech_test_other_speech2text.jsonl"; do
+    jsonl=${jsonl_dir}/${data_set}
+    output_dir=${out_dir}/${data_set}
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/1best_recog/text_tn
+    ref_file=${output_dir}/1best_recog/label
+
+    python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
+
+    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=true
+
+done
+
+for data_set in "s2tt_en2zh.v20240605.test.jsonl"; do
+    jsonl=${jsonl_dir}/${data_set}
+    output_dir=${out_dir}/${data_set}
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/1best_recog/text_tn
+    ref_file=${output_dir}/1best_recog/label
+
+    python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
+
+    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=true
+
+done
+
+for data_set in "s2tt_zh2en.v20240605.test.jsonl"; do
+    jsonl=${jsonl_dir}/${data_set}
+    output_dir=${out_dir}/${data_set}
+    mkdir -p ${output_dir}
+    pred_file=${output_dir}/1best_recog/text_tn
+    ref_file=${output_dir}/1best_recog/label
+
+    python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
+
+    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=false
+
+done
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh b/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh
new file mode 100644
index 0000000..306e23d
--- /dev/null
+++ b/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh
@@ -0,0 +1,47 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+
+# data dir, which contains: train.json, val.json, tokens.jsonl/tokens.txt, am.mvn
+#data_dir="/Users/zhifu/funasr1.0/data/list"
+
+## generate jsonl from wav.scp and text.txt
+#python -m funasr.datasets.audio_datasets.scp2jsonl \
+#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
+#++data_type_list='["source", "target"]' \
+#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+
+train_data="/nfs/beinian.lzr/workspace/tools/speech2speech_tools/speech2text/out_dir/tmp_wav.jsonl"
+val_data="/nfs/beinian.lzr/workspace/tools/speech2speech_tools/speech2text/out_dir/tmp_wav.jsonl"
+
+# exp output dir
+output_dir="/Users/zhifu/funasr1.0/test_local/data_tmp/"
+log_file="${output_dir}/log.txt"
+
+workspace=`pwd`
+config="whisper_qwen_linear2.yaml"
+
+init_param="${output_dir}/model.pt"
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
+
+torchrun \
+--nnodes 1 \
+--nproc_per_node ${gpu_num} \
+../../../funasr/bin/train.py \
+--config-path "${workspace}/conf" \
+--config-name "${config}" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++dataset_conf.batch_size=1 \
+++dataset_conf.num_workers=0 \
+++train_conf.max_epoch=15 \
+++train_conf.save_checkpoint_interval=1000 \
+++optim_conf.lr=0.0001 \
+++init_param="${init_param}" \
+++output_dir="${output_dir}" &> ${log_file} &
diff --git a/examples/industrial_data_pretraining/llm_asr/infer_speech2text.sh b/examples/industrial_data_pretraining/llm_asr/infer_speech2text.sh
new file mode 100644
index 0000000..bfdaca7
--- /dev/null
+++ b/examples/industrial_data_pretraining/llm_asr/infer_speech2text.sh
@@ -0,0 +1,9 @@
+
+
+python funasr/bin/inference.py \
+--config-path="/nfs/zhifu.gzf/ckpt/llm_asr_nar_exp1" \
+--config-name="config.yaml" \
+++init_param="/nfs/zhifu.gzf/ckpt/llm_asr_nar_exp1/model.pt.ep5" \
+++input="/Users/zhifu/funasr1.0/test_local/data_tmp/tmp_wav_10.jsonl" \
+++output_dir="/nfs/zhifu.gzf/ckpt/llm_asr_nar_exp1/inference/aishell2-dev_ios-funasr" \
+++device="cpu"
\ No newline at end of file
diff --git a/examples/wenetspeech/conformer/run.sh b/examples/wenetspeech/conformer/run.sh
index 0503a9e..6ae995a 100755
--- a/examples/wenetspeech/conformer/run.sh
+++ b/examples/wenetspeech/conformer/run.sh
@@ -92,7 +92,7 @@
     echo "<blank>" > ${token_list}
     echo "<s>" >> ${token_list}
     echo "</s>" >> ${token_list}
-    utils/text2token.py -s 1 -n 1 --space "" --text_format "jsonl" ${feats_dir}/data/$train_set/audio_datasets.jsonl | cut -f 2- -d" " | tr " " "\n" \
+    utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/$train_set/text | cut -f 2- -d" " | tr " " "\n" \
         | sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
     echo "<unk>" >> ${token_list}
 fi
diff --git a/examples/wenetspeech/transformer/README.md b/examples/wenetspeech/transformer/README.md
deleted file mode 100644
index 2435b55..0000000
--- a/examples/wenetspeech/transformer/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-# Conformer Result
-
-## Training Config
-- Feature info: using 80 dims fbank, global cmvn, speed perturb(0.9, 1.0, 1.1), specaugment
-- Train info: lr 5e-4, batch_size 25000, 2 gpu(Tesla V100), acc_grad 1, 50 epochs
-- Train config: conf/train_asr_transformer.yaml
-- LM config: LM was not used
-- Model size: 46M
-
-## Results (CER)
-
-|   testset   | CER(%) |
-|:-----------:|:------:|
-|     dev     |  4.97  |
-|    test     |  5.37  |
\ No newline at end of file
diff --git a/examples/wenetspeech/transformer/conf/transformer_12e_6d_2048_256.yaml b/examples/wenetspeech/transformer/conf/transformer_12e_6d_2048_256.yaml
deleted file mode 100644
index efcf593..0000000
--- a/examples/wenetspeech/transformer/conf/transformer_12e_6d_2048_256.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# This is an example that demonstrates how to configure a model file.
-# You can modify the configuration according to your own requirements.
-
-# to print the register_table:
-# from funasr.register import tables
-# tables.print()
-
-# network architecture
-model: Transformer
-model_conf:
-    ctc_weight: 0.3
-    lsm_weight: 0.1     # label smoothing option
-    length_normalized_loss: false
-
-# encoder
-encoder: TransformerEncoder
-encoder_conf:
-    output_size: 256    # dimension of attention
-    attention_heads: 4
-    linear_units: 2048  # the number of units of position-wise feed forward
-    num_blocks: 12      # the number of encoder blocks
-    dropout_rate: 0.1
-    positional_dropout_rate: 0.1
-    attention_dropout_rate: 0.0
-    input_layer: conv2d # encoder architecture type
-    normalize_before: true
-
-# decoder
-decoder: TransformerDecoder
-decoder_conf:
-    attention_heads: 4
-    linear_units: 2048
-    num_blocks: 6
-    dropout_rate: 0.1
-    positional_dropout_rate: 0.1
-    self_attention_dropout_rate: 0.0
-    src_attention_dropout_rate: 0.0
-
-
-# frontend related
-frontend: WavFrontend
-frontend_conf:
-    fs: 16000
-    window: hamming
-    n_mels: 80
-    frame_length: 25
-    frame_shift: 10
-    lfr_m: 1
-    lfr_n: 1
-
-specaug: SpecAug
-specaug_conf:
-    apply_time_warp: true
-    time_warp_window: 5
-    time_warp_mode: bicubic
-    apply_freq_mask: true
-    freq_mask_width_range:
-    - 0
-    - 30
-    num_freq_mask: 2
-    apply_time_mask: true
-    time_mask_width_range:
-    - 0
-    - 40
-    num_time_mask: 2
-
-train_conf:
-  accum_grad: 1
-  grad_clip: 5
-  max_epoch: 150
-  keep_nbest_models: 10
-  log_interval: 50
-
-optim: adam
-optim_conf:
-   lr: 0.002
-scheduler: warmuplr
-scheduler_conf:
-   warmup_steps: 30000
-
-dataset: AudioDataset
-dataset_conf:
-    index_ds: IndexDSJsonl
-    batch_sampler: EspnetStyleBatchSampler
-    batch_type: length # example or length
-    batch_size: 25000 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len;
-    max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length,
-    buffer_size: 1024
-    shuffle: True
-    num_workers: 4
-    preprocessor_speech: SpeechPreprocessSpeedPerturb
-    preprocessor_speech_conf:
-      speed_perturb: [0.9, 1.0, 1.1]
-
-tokenizer: CharTokenizer
-tokenizer_conf:
-  unk_symbol: <unk>
-
-ctc_conf:
-    dropout_rate: 0.0
-    ctc_type: builtin
-    reduce: true
-    ignore_nan_grad: true
-normalize: null
diff --git a/examples/wenetspeech/transformer/demo_infer.sh b/examples/wenetspeech/transformer/demo_infer.sh
deleted file mode 120000
index 9d0a7a9..0000000
--- a/examples/wenetspeech/transformer/demo_infer.sh
+++ /dev/null
@@ -1 +0,0 @@
-../paraformer/demo_infer.sh
\ No newline at end of file
diff --git a/examples/wenetspeech/transformer/demo_train_or_finetune.sh b/examples/wenetspeech/transformer/demo_train_or_finetune.sh
deleted file mode 120000
index bbabdbe..0000000
--- a/examples/wenetspeech/transformer/demo_train_or_finetune.sh
+++ /dev/null
@@ -1 +0,0 @@
-../paraformer/demo_train_or_finetune.sh
\ No newline at end of file
diff --git a/examples/wenetspeech/transformer/local/aishell_data_prep.sh b/examples/wenetspeech/transformer/local/aishell_data_prep.sh
deleted file mode 100755
index 83f489b..0000000
--- a/examples/wenetspeech/transformer/local/aishell_data_prep.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017 Xingyu Na
-# Apache 2.0
-
-#. ./path.sh || exit 1;
-
-if [ $# != 3 ]; then
-  echo "Usage: $0 <audio-path> <text-path> <output-path>"
-  echo " $0 /export/a05/xna/data/data_aishell/wav /export/a05/xna/data/data_aishell/transcript data"
-  exit 1;
-fi
-
-aishell_audio_dir=$1
-aishell_text=$2/aishell_transcript_v0.8.txt
-output_dir=$3
-
-train_dir=$output_dir/data/local/train
-dev_dir=$output_dir/data/local/dev
-test_dir=$output_dir/data/local/test
-tmp_dir=$output_dir/data/local/tmp
-
-mkdir -p $train_dir
-mkdir -p $dev_dir
-mkdir -p $test_dir
-mkdir -p $tmp_dir
-
-# data directory check
-if [ ! -d $aishell_audio_dir ] || [ ! -f $aishell_text ]; then
-  echo "Error: $0 requires two directory arguments"
-  exit 1;
-fi
-
-# find wav audio file for train, dev and test resp.
-find $aishell_audio_dir -iname "*.wav" > $tmp_dir/wav.flist
-n=`cat $tmp_dir/wav.flist | wc -l`
-[ $n -ne 141925 ] && \
-  echo Warning: expected 141925 data data files, found $n
-
-grep -i "wav/train" $tmp_dir/wav.flist > $train_dir/wav.flist || exit 1;
-grep -i "wav/dev" $tmp_dir/wav.flist > $dev_dir/wav.flist || exit 1;
-grep -i "wav/test" $tmp_dir/wav.flist > $test_dir/wav.flist || exit 1;
-
-rm -r $tmp_dir
-
-# Transcriptions preparation
-for dir in $train_dir $dev_dir $test_dir; do
-  echo Preparing $dir transcriptions
-  sed -e 's/\.wav//' $dir/wav.flist | awk -F '/' '{print $NF}' > $dir/utt.list
-  paste -d' ' $dir/utt.list $dir/wav.flist > $dir/wav.scp_all
-  utils/filter_scp.pl -f 1 $dir/utt.list $aishell_text > $dir/transcripts.txt
-  awk '{print $1}' $dir/transcripts.txt > $dir/utt.list
-  utils/filter_scp.pl -f 1 $dir/utt.list $dir/wav.scp_all | sort -u > $dir/wav.scp
-  sort -u $dir/transcripts.txt > $dir/text
-done
-
-mkdir -p $output_dir/data/train $output_dir/data/dev $output_dir/data/test
-
-for f in wav.scp text; do
-  cp $train_dir/$f $output_dir/data/train/$f || exit 1;
-  cp $dev_dir/$f $output_dir/data/dev/$f || exit 1;
-  cp $test_dir/$f $output_dir/data/test/$f || exit 1;
-done
-
-echo "$0: AISHELL data preparation succeeded"
-exit 0;
diff --git a/examples/wenetspeech/transformer/local/download_and_untar.sh b/examples/wenetspeech/transformer/local/download_and_untar.sh
deleted file mode 100755
index d982559..0000000
--- a/examples/wenetspeech/transformer/local/download_and_untar.sh
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright   2014  Johns Hopkins University (author: Daniel Povey)
-#             2017  Xingyu Na
-# Apache 2.0
-
-remove_archive=false
-
-if [ "$1" == --remove-archive ]; then
-  remove_archive=true
-  shift
-fi
-
-if [ $# -ne 3 ]; then
-  echo "Usage: $0 [--remove-archive] <data-base> <url-base> <corpus-part>"
-  echo "e.g.: $0 /export/a05/xna/data www.openslr.org/resources/33 data_aishell"
-  echo "With --remove-archive it will remove the archive after successfully un-tarring it."
-  echo "<corpus-part> can be one of: data_aishell, resource_aishell."
-fi
-
-data=$1
-url=$2
-part=$3
-
-if [ ! -d "$data" ]; then
-  echo "$0: no such directory $data"
-  exit 1;
-fi
-
-part_ok=false
-list="data_aishell resource_aishell"
-for x in $list; do
-  if [ "$part" == $x ]; then part_ok=true; fi
-done
-if ! $part_ok; then
-  echo "$0: expected <corpus-part> to be one of $list, but got '$part'"
-  exit 1;
-fi
-
-if [ -z "$url" ]; then
-  echo "$0: empty URL base."
-  exit 1;
-fi
-
-if [ -f $data/$part/.complete ]; then
-  echo "$0: data part $part was already successfully extracted, nothing to do."
-  exit 0;
-fi
-
-# sizes of the archive files in bytes.
-sizes="15582913665 1246920"
-
-if [ -f $data/$part.tgz ]; then
-  size=$(/bin/ls -l $data/$part.tgz | awk '{print $5}')
-  size_ok=false
-  for s in $sizes; do if [ $s == $size ]; then size_ok=true; fi; done
-  if ! $size_ok; then
-    echo "$0: removing existing file $data/$part.tgz because its size in bytes $size"
-    echo "does not equal the size of one of the archives."
-    rm $data/$part.tgz
-  else
-    echo "$data/$part.tgz exists and appears to be complete."
-  fi
-fi
-
-if [ ! -f $data/$part.tgz ]; then
-  if ! command -v wget >/dev/null; then
-    echo "$0: wget is not installed."
-    exit 1;
-  fi
-  full_url=$url/$part.tgz
-  echo "$0: downloading data from $full_url.  This may take some time, please be patient."
-
-  cd $data || exit 1
-  if ! wget --no-check-certificate $full_url; then
-    echo "$0: error executing wget $full_url"
-    exit 1;
-  fi
-fi
-
-cd $data || exit 1
-
-if ! tar -xvzf $part.tgz; then
-  echo "$0: error un-tarring archive $data/$part.tgz"
-  exit 1;
-fi
-
-touch $data/$part/.complete
-
-if [ $part == "data_aishell" ]; then
-  cd $data/$part/wav || exit 1
-  for wav in ./*.tar.gz; do
-    echo "Extracting wav from $wav"
-    tar -zxf $wav && rm $wav
-  done
-fi
-
-echo "$0: Successfully downloaded and un-tarred $data/$part.tgz"
-
-if $remove_archive; then
-  echo "$0: removing $data/$part.tgz file since --remove-archive option was supplied."
-  rm $data/$part.tgz
-fi
-
-exit 0;
diff --git a/examples/wenetspeech/transformer/run.sh b/examples/wenetspeech/transformer/run.sh
deleted file mode 100755
index 3fb8465..0000000
--- a/examples/wenetspeech/transformer/run.sh
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env bash
-
-
-CUDA_VISIBLE_DEVICES="0,1"
-
-# general configuration
-feats_dir="../DATA" #feature output dictionary
-exp_dir=`pwd`
-lang=zh
-token_type=char
-stage=0
-stop_stage=5
-
-# feature configuration
-nj=32
-
-inference_device="cuda" #"cpu"
-inference_checkpoint="model.pt.avg10"
-inference_scp="wav.scp"
-inference_batch_size=1
-
-# data
-raw_data=../raw_data
-data_url=www.openslr.org/resources/33
-
-# exp tag
-tag="exp1"
-workspace=`pwd`
-
-master_port=12345
-
-. utils/parse_options.sh || exit 1;
-
-# Set bash to 'debug' mode, it will exit on :
-# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
-set -e
-set -u
-set -o pipefail
-
-train_set=train
-valid_set=dev
-test_sets="dev test"
-
-config=transformer_12e_6d_2048_256.yaml
-model_dir="baseline_$(basename "${config}" .yaml)_${lang}_${token_type}_${tag}"
-
-
-
-if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
-    echo "stage -1: Data Download"
-    mkdir -p ${raw_data}
-    local/download_and_untar.sh ${raw_data} ${data_url} data_aishell
-    local/download_and_untar.sh ${raw_data} ${data_url} resource_aishell
-fi
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
-    echo "stage 0: Data preparation"
-    # Data preparation
-    local/aishell_data_prep.sh ${raw_data}/data_aishell/wav ${raw_data}/data_aishell/transcript ${feats_dir}
-    for x in train dev test; do
-        cp ${feats_dir}/data/${x}/text ${feats_dir}/data/${x}/text.org
-        paste -d " " <(cut -f 1 -d" " ${feats_dir}/data/${x}/text.org) <(cut -f 2- -d" " ${feats_dir}/data/${x}/text.org | tr -d " ") \
-            > ${feats_dir}/data/${x}/text
-        utils/text2token.py -n 1 -s 1 ${feats_dir}/data/${x}/text > ${feats_dir}/data/${x}/text.org
-        mv ${feats_dir}/data/${x}/text.org ${feats_dir}/data/${x}/text
-
-        # convert wav.scp text to jsonl
-        scp_file_list_arg="++scp_file_list='[\"${feats_dir}/data/${x}/wav.scp\",\"${feats_dir}/data/${x}/text\"]'"
-        python ../../../funasr/datasets/audio_datasets/scp2jsonl.py \
-        ++data_type_list='["source", "target"]' \
-        ++jsonl_file_out=${feats_dir}/data/${x}/audio_datasets.jsonl \
-        ${scp_file_list_arg}
-    done
-fi
-
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
-    echo "stage 1: Feature and CMVN Generation"
-    python ../../../funasr/bin/compute_audio_cmvn.py \
-    --config-path "${workspace}/conf" \
-    --config-name "${config}" \
-    ++train_data_set_list="${feats_dir}/data/${train_set}/audio_datasets.jsonl" \
-    ++cmvn_file="${feats_dir}/data/${train_set}/cmvn.json" \
-
-fi
-
-token_list=${feats_dir}/data/${lang}_token_list/$token_type/tokens.txt
-echo "dictionary: ${token_list}"
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
-    echo "stage 2: Dictionary Preparation"
-    mkdir -p ${feats_dir}/data/${lang}_token_list/$token_type/
-
-    echo "make a dictionary"
-    echo "<blank>" > ${token_list}
-    echo "<s>" >> ${token_list}
-    echo "</s>" >> ${token_list}
-    utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/$train_set/text | cut -f 2- -d" " | tr " " "\n" \
-        | sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
-    echo "<unk>" >> ${token_list}
-fi
-
-# LM Training Stage
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
-    echo "stage 3: LM Training"
-fi
-
-# ASR Training Stage
-if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
-  echo "stage 4: ASR Training"
-
-  mkdir -p ${exp_dir}/exp/${model_dir}
-  current_time=$(date "+%Y-%m-%d_%H-%M")
-  log_file="${exp_dir}/exp/${model_dir}/train.log.txt.${current_time}"
-  echo "log_file: ${log_file}"
-
-  export CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES
-  gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-  torchrun \
-  --nnodes 1 \
-  --nproc_per_node ${gpu_num} \
-  --master_port ${master_port} \
-  ../../../funasr/bin/train.py \
-  --config-path "${workspace}/conf" \
-  --config-name "${config}" \
-  ++train_data_set_list="${feats_dir}/data/${train_set}/audio_datasets.jsonl" \
-  ++valid_data_set_list="${feats_dir}/data/${valid_set}/audio_datasets.jsonl" \
-  ++tokenizer_conf.token_list="${token_list}" \
-  ++frontend_conf.cmvn_file="${feats_dir}/data/${train_set}/am.mvn" \
-  ++output_dir="${exp_dir}/exp/${model_dir}" &> ${log_file}
-fi
-
-
-
-# Testing Stage
-if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
-  echo "stage 5: Inference"
-
-  if [ ${inference_device} == "cuda" ]; then
-      nj=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-  else
-      inference_batch_size=1
-      CUDA_VISIBLE_DEVICES=""
-      for JOB in $(seq ${nj}); do
-          CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"-1,"
-      done
-  fi
-
-  for dset in ${test_sets}; do
-
-    inference_dir="${exp_dir}/exp/${model_dir}/inference-${inference_checkpoint}/${dset}"
-    _logdir="${inference_dir}/logdir"
-    echo "inference_dir: ${inference_dir}"
-
-    mkdir -p "${_logdir}"
-    data_dir="${feats_dir}/data/${dset}"
-    key_file=${data_dir}/${inference_scp}
-
-    split_scps=
-    for JOB in $(seq "${nj}"); do
-        split_scps+=" ${_logdir}/keys.${JOB}.scp"
-    done
-    utils/split_scp.pl "${key_file}" ${split_scps}
-
-    gpuid_list_array=(${CUDA_VISIBLE_DEVICES//,/ })
-    for JOB in $(seq ${nj}); do
-        {
-          id=$((JOB-1))
-          gpuid=${gpuid_list_array[$id]}
-
-          export CUDA_VISIBLE_DEVICES=${gpuid}
-          python ../../../funasr/bin/inference.py \
-          --config-path="${exp_dir}/exp/${model_dir}" \
-          --config-name="config.yaml" \
-          ++init_param="${exp_dir}/exp/${model_dir}/${inference_checkpoint}" \
-          ++tokenizer_conf.token_list="${token_list}" \
-          ++frontend_conf.cmvn_file="${feats_dir}/data/${train_set}/am.mvn" \
-          ++input="${_logdir}/keys.${JOB}.scp" \
-          ++output_dir="${inference_dir}/${JOB}" \
-          ++device="${inference_device}" \
-          ++ncpu=1 \
-          ++disable_log=true \
-          ++batch_size="${inference_batch_size}" &> ${_logdir}/log.${JOB}.txt
-        }&
-
-    done
-    wait
-
-    mkdir -p ${inference_dir}/1best_recog
-    for f in token score text; do
-        if [ -f "${inference_dir}/${JOB}/1best_recog/${f}" ]; then
-          for JOB in $(seq "${nj}"); do
-              cat "${inference_dir}/${JOB}/1best_recog/${f}"
-          done | sort -k1 >"${inference_dir}/1best_recog/${f}"
-        fi
-    done
-
-    echo "Computing WER ..."
-    python utils/postprocess_text_zh.py ${inference_dir}/1best_recog/text ${inference_dir}/1best_recog/text.proc
-    python utils/postprocess_text_zh.py  ${data_dir}/text ${inference_dir}/1best_recog/text.ref
-    python utils/compute_wer.py ${inference_dir}/1best_recog/text.ref ${inference_dir}/1best_recog/text.proc ${inference_dir}/1best_recog/text.cer
-    tail -n 3 ${inference_dir}/1best_recog/text.cer
-  done
-
-fi
\ No newline at end of file
diff --git a/examples/wenetspeech/transformer/utils b/examples/wenetspeech/transformer/utils
deleted file mode 120000
index 1f2ce9d..0000000
--- a/examples/wenetspeech/transformer/utils
+++ /dev/null
@@ -1 +0,0 @@
-../paraformer/utils
\ No newline at end of file
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 191e172..047e652 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -233,6 +233,8 @@
         # fp16
         if kwargs.get("fp16", False):
             model.to(torch.float16)
+        elif kwargs.get("bf16", False):
+            model.to(torch.bfloat16)
         return model, kwargs
 
     def __call__(self, *args, **cfg):
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index c3556d1..1a30481 100644
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -202,6 +202,7 @@
         time1 = time.perf_counter()
 
         for data_split_i in range(trainer.start_data_split_i, dataloader.data_split_num):
+            time_slice_i = time.perf_counter()
             dataloader_tr, dataloader_val = dataloader.build_iter(
                 epoch, data_split_i=data_split_i, start_step=trainer.start_step
             )
@@ -223,6 +224,14 @@
 
             torch.cuda.empty_cache()
 
+            time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
+            logging.info(
+                f"rank: {local_rank}, "
+                f"time_escaped_epoch: {time_escaped:.3f} hours, "
+                f"estimated to finish {dataloader.data_split_num} data_slices, remaining: {dataloader.data_split_num-data_split_i} slices, {(dataloader.data_split_num-data_split_i)*time_escaped:.3f} hours, "
+                f"epoch: {trainer.max_epoch - epoch} epochs, {((trainer.max_epoch - epoch - 1)*dataloader.data_split_num + dataloader.data_split_num-data_split_i)*time_escaped:.3f} hours\n"
+            )
+
         trainer.start_data_split_i = 0
         trainer.validate_epoch(
             model=model, dataloader_val=dataloader_val, epoch=epoch + 1, writer=writer
diff --git a/funasr/bin/train_ds.py b/funasr/bin/train_ds.py
index a4ae11b..5b1d4fd 100644
--- a/funasr/bin/train_ds.py
+++ b/funasr/bin/train_ds.py
@@ -158,6 +158,8 @@
         time1 = time.perf_counter()
 
         for data_split_i in range(trainer.start_data_split_i, dataloader.data_split_num):
+            time_slice_i = time.perf_counter()
+
             dataloader_tr, dataloader_val = dataloader.build_iter(
                 epoch, data_split_i=data_split_i, start_step=trainer.start_step
             )
@@ -178,6 +180,14 @@
 
             torch.cuda.empty_cache()
 
+            time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
+            logging.info(
+                f"rank: {local_rank}, "
+                f"time_escaped_epoch: {time_escaped:.3f} hours, "
+                f"estimated to finish {dataloader.data_split_num} data_slices, remaining: {dataloader.data_split_num-data_split_i} slices, {(dataloader.data_split_num-data_split_i)*time_escaped:.3f} hours, "
+                f"epoch: {trainer.max_epoch - epoch} epochs, {((trainer.max_epoch - epoch - 1)*dataloader.data_split_num + dataloader.data_split_num-data_split_i)*time_escaped:.3f} hours\n"
+            )
+
         trainer.start_data_split_i = 0
         trainer.validate_epoch(model=model, dataloader_val=dataloader_val, epoch=epoch + 1)
         scheduler.step()
diff --git a/funasr/datasets/audio_datasets/samplers.py b/funasr/datasets/audio_datasets/samplers.py
index 18f8f91..f7057de 100644
--- a/funasr/datasets/audio_datasets/samplers.py
+++ b/funasr/datasets/audio_datasets/samplers.py
@@ -334,6 +334,7 @@
         drop_last=False,
         is_training: bool = True,
         sort_size: int = 1024,
+        start_step: int = 0,
         **kwargs,
     ):
 
@@ -364,9 +365,14 @@
         self.sort_size = sort_size * num_replicas
         self.max_token_length = kwargs.get("max_token_length", 2048)
         self.length_scale_source = kwargs.get("length_scale_source", 1.0)
-        super().__init__(
-            dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle, drop_last=drop_last
-        )
+        self.batch_size_sample_max = kwargs.get("batch_size_sample_max", 200)
+        self.start_step = start_step
+        self.batch_num = 1
+        if self.start_step > 0:
+            logging.info(f"Warning, start_step > 0, dataloader start from step: {self.start_step}")
+        # super().__init__(
+        #     dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle, drop_last=drop_last
+        # )
 
     def __iter__(self):
         if self.shuffle:
@@ -386,19 +392,22 @@
             )
             batch = []
             max_len_in_batch = 0
+            count = 1
             for idx in buffer:
                 original_sample_length = self.dataset.get_source_len(idx)
                 if original_sample_length > self.max_token_length:
                     continue
                 sample_length = 1 if self.batch_type == "example" else original_sample_length
                 potential_batch_length = max(max_len_in_batch, sample_length) * (len(batch) + 1)
-                if potential_batch_length <= self.batch_size:
+                if potential_batch_length <= self.batch_size and count < self.batch_size_sample_max:
                     batch.append(idx)
                     max_len_in_batch = max(max_len_in_batch, sample_length)
+                    count += 1
                 else:
                     buffer_batches.append(batch)
                     batch = [idx]
                     max_len_in_batch = sample_length
+                    count = 1
             if batch:
                 buffer_batches.append(batch)
 
@@ -415,13 +424,17 @@
             rank_batches[i % self.num_replicas].append(batch)
 
         # Assign all batches for the current rank directly
-        final_batches = rank_batches[self.rank]
+        final_batches = rank_batches[self.rank][self.start_step :]
+        self.batch_num = len(final_batches)
 
+        logging.info(
+            f"rank: {self.rank}, dataloader start from step: {self.start_step}, batch_num: {len(rank_batches[self.rank])}, after: {self.batch_num}"
+        )
         return iter(final_batches)
 
     def __len__(self):
-
-        return 1
+        # Calculate the number of batches per epoch for the current rank
+        return self.batch_num
 
     def set_epoch(self, epoch):
         self.epoch = epoch
diff --git a/funasr/datasets/dataloader_entry.py b/funasr/datasets/dataloader_entry.py
index 925b1d3..055e4c8 100644
--- a/funasr/datasets/dataloader_entry.py
+++ b/funasr/datasets/dataloader_entry.py
@@ -49,14 +49,19 @@
     def __init__(self, frontend=None, tokenizer=None, **kwargs):
         # dataset
         logging.info("Build dataloader")
+
         dataset_class = tables.dataset_classes.get(kwargs.get("dataset", "AudioDataset"))
-        dataset_tr = dataset_class(
-            kwargs.get("train_data_set_list"),
-            frontend=frontend,
-            tokenizer=tokenizer,
-            is_training=True,
-            **kwargs.get("dataset_conf"),
-        )
+        dataset_tr = None
+        # split dataset
+        self.data_split_num = kwargs["dataset_conf"].get("data_split_num", 1)
+        if self.data_split_num == 1:
+            dataset_tr = dataset_class(
+                kwargs.get("train_data_set_list"),
+                frontend=frontend,
+                tokenizer=tokenizer,
+                is_training=True,
+                **kwargs.get("dataset_conf"),
+            )
         dataset_val = dataset_class(
             kwargs.get("valid_data_set_list"),
             frontend=frontend,
@@ -69,8 +74,6 @@
         self.dataset_val = dataset_val
         self.kwargs = kwargs
 
-        # split dataset
-        self.data_split_num = kwargs["dataset_conf"].get("data_split_num", 1)
         self.dataset_class = dataset_class
         self.frontend = frontend
         self.tokenizer = tokenizer
diff --git a/funasr/datasets/openai_datasets/__init__.py b/funasr/datasets/openai_datasets/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/datasets/openai_datasets/__init__.py
diff --git a/funasr/datasets/openai_datasets/datasets.py b/funasr/datasets/openai_datasets/datasets.py
new file mode 100644
index 0000000..8d243ac
--- /dev/null
+++ b/funasr/datasets/openai_datasets/datasets.py
@@ -0,0 +1,224 @@
+import logging
+import re
+import torch
+import random
+import traceback
+from funasr.register import tables
+from funasr.utils.load_utils import extract_fbank, load_audio_text_image_video
+
+
+@tables.register("dataset_classes", "OpenAIDataset")
+class OpenAIDataset(torch.utils.data.Dataset):
+    """
+    SenseVoiceDataset
+    """
+
+    def __init__(
+        self,
+        path,
+        index_ds: str = None,
+        frontend=None,
+        tokenizer=None,
+        int_pad_value: int = -1,
+        float_pad_value: float = 0.0,
+        **kwargs,
+    ):
+        super().__init__()
+        index_ds_class = tables.index_ds_classes.get(index_ds)
+        self.index_ds = index_ds_class(path, **kwargs)
+        preprocessor_speech = kwargs.get("preprocessor_speech", None)
+        if preprocessor_speech:
+            preprocessor_speech_class = tables.preprocessor_classes.get(preprocessor_speech)
+            preprocessor_speech = preprocessor_speech_class(
+                **kwargs.get("preprocessor_speech_conf")
+            )
+        self.preprocessor_speech = preprocessor_speech
+        preprocessor_text = kwargs.get("preprocessor_text", None)
+        if preprocessor_text:
+            preprocessor_text_class = tables.preprocessor_classes.get(preprocessor_text)
+            preprocessor_text = preprocessor_text_class(**kwargs.get("preprocessor_text_conf"))
+        self.preprocessor_text = preprocessor_text
+
+        self.frontend = frontend
+        self.fs = 16000 if frontend is None else frontend.fs
+        self.data_type = "sound"
+        self.tokenizer = tokenizer
+
+        self.int_pad_value = int_pad_value
+        self.float_pad_value = float_pad_value
+        self.sos = kwargs.get("sos", "<|startoftranscript|>")
+        self.eos = kwargs.get("eos", "<|endoftext|>")
+        self.batch_size = kwargs.get("batch_size")
+        self.batch_type = kwargs.get("batch_type")
+        self.prompt_ids_len = 0
+        self.retry = kwargs.get("retry", 100)
+
+        self.permute = False
+        from funasr.frontends.whisper_frontend import WhisperFrontend
+
+        if isinstance(self.frontend, WhisperFrontend):
+            self.permute = True
+
+        self.pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
+        # self.kwargs = kwargs
+        self.max_token_length = kwargs.get("max_token_length", 1024)
+        self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
+        self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
+
+    def get_source_len(self, index):
+        item = self.index_ds[index]
+        return self.index_ds.get_source_len(item)
+
+    def get_target_len(self, index):
+        item = self.index_ds[index]
+        return self.index_ds.get_target_len(item)
+
+    def __len__(self):
+        return len(self.index_ds)
+
+    def __getitem__(self, index):
+        # import pdb;
+        # pdb.set_trace()
+
+        output = None
+
+        for idx in range(self.retry):
+            badcase_flag = False
+            if idx == 0:
+                index_cur = index
+            else:
+                index_cur = torch.randint(0, len(self.index_ds), ()).item()
+
+            item = self.index_ds[index_cur]
+
+            system = item["system"]
+            user = item["user"]
+            assistant = item["assistant"]
+
+            input_ids, labels, fbank, fbank_lens, fbank_mask, fbank_beg = [], [], [], [], [], []
+
+            for i, (system_prompt, user_prompt, target_out) in enumerate(
+                zip(system, user, assistant)
+            ):
+
+                source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
+
+                splits = self.pattern.split(source_input)
+                source_ids = []
+                fbank_mask_i = []
+                fbank_beg_i = []
+                fbank_lens_i = []
+                for k, sub_str in enumerate(splits):
+                    if not sub_str.startswith("<|startofspeech|>"):
+                        sub_token = self.tokenizer.encode(sub_str)
+                        source_ids += sub_token
+                        fbank_mask_i += [0] * len(sub_token)
+                    else:
+                        sub_str = sub_str.replace("<|startofspeech|>", "").replace(
+                            "<|endofspeech|>", ""
+                        )
+                        if sub_str.startswith("!"):
+                            try:
+                                data_src = load_audio_text_image_video(sub_str[1:], fs=self.fs)
+                            except Exception as e:
+                                logging.error(
+                                    f"Loading wav failed! {str(e)}, {traceback.format_exc()}"
+                                )
+                                badcase_flag = True
+                                continue
+                            speech, speech_lengths = extract_fbank(
+                                data_src,
+                                data_type=self.data_type,
+                                frontend=self.frontend,
+                                is_final=True,
+                            )  # speech: [b, T, d]
+                            if self.permute:
+                                speech = speech.permute(0, 2, 1)
+                            # if speech_lengths > self.batch_size:
+                            #     continue
+
+                            olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                            olens = 1 + (olens - 3 + 2 * 1) // 2
+                            sub_token_len = (olens - 1) // 2 + 1
+                            sub_token = [0] * sub_token_len
+                            fbank_beg_i = [len(source_ids)]
+                            source_ids += sub_token
+                            fbank_mask_i += [1] * len(sub_token)
+
+                if badcase_flag:
+                    continue
+                source_mask = [-100] * len(source_ids)
+                target_out = f"{target_out}<|im_end|>"
+                target_ids = self.tokenizer.encode(target_out)
+                input_ids += source_ids + target_ids
+                labels += source_mask + target_ids
+                fbank_mask += fbank_mask_i
+                fbank_beg.append(fbank_beg_i)
+
+            if len(input_ids) > self.max_token_length:
+                logging.info(
+                    f"input_ids > max_token_length: {len(input_ids)}>{self.max_token_length}, {item}"
+                )
+                badcase_flag = True
+            if badcase_flag:
+                continue
+            input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
+            attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+            labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
+
+            fbank = speech[0, :, :]
+            fbank_lens = speech_lengths
+            fbank_mask = torch.tensor(fbank_mask, dtype=torch.float32)
+            fbank_beg = torch.tensor(fbank_beg, dtype=torch.int32)
+
+            output = {
+                "speech": fbank,
+                "speech_lengths": fbank_lens,
+                "fbank_mask": fbank_mask,
+                "fbank_beg": fbank_beg,
+                "input_ids": input_ids,
+                "attention_mask": attention_mask,
+                "labels_ids": labels,
+            }
+            break
+
+        return output
+
+    def collator(self, samples: list = None):
+
+        for idx in range(self.retry):
+            badcase_flag = False
+
+            outputs = {}
+            for sample in samples:
+                if sample is None:
+                    continue
+                for key in sample.keys():
+                    if key not in outputs:
+                        outputs[key] = []
+                    outputs[key].append(sample[key])
+
+            for key, data_list in outputs.items():
+                if isinstance(data_list[0], torch.Tensor):
+                    if data_list[0].dtype == torch.int64 or data_list[0].dtype == torch.int32:
+
+                        pad_value = self.int_pad_value
+                    else:
+                        pad_value = self.float_pad_value
+
+                    outputs[key] = torch.nn.utils.rnn.pad_sequence(
+                        data_list, batch_first=True, padding_value=pad_value
+                    )
+
+            if self.batch_type != "example":
+                b, t = outputs["input_ids"].shape
+                if b > 1 and b * t > self.batch_size_token_max:
+                    logging.info(
+                        f"Warning, {idx}th, b*t: {b}*{t}={b * t} > batch_size_sample_max: {self.batch_size_token_max}, drop last data"
+                    )
+                    samples = samples[:-1]
+                    continue
+
+            break
+
+        return outputs
diff --git a/funasr/datasets/openai_datasets/index_ds.py b/funasr/datasets/openai_datasets/index_ds.py
new file mode 100644
index 0000000..cc518f8
--- /dev/null
+++ b/funasr/datasets/openai_datasets/index_ds.py
@@ -0,0 +1,106 @@
+import os
+import json
+import torch
+import logging
+
+import librosa
+import random
+import torch.distributed as dist
+
+from funasr.register import tables
+
+
+@tables.register("index_ds_classes", "OpenAIIndexDSJsonl")
+class OpenAIIndexDSJsonl(torch.utils.data.Dataset):  # torch.utils.data.Dataset
+
+    def __init__(self, path: str, **kwargs):
+        super().__init__()
+        self.max_source_length = kwargs.get("max_source_length", 2048)
+        self.min_source_length = kwargs.get("min_source_length", 0)
+        self.max_target_length = kwargs.get("max_target_length", 2048)
+        self.min_target_length = kwargs.get("min_target_length", 0)
+        self.max_token_length = kwargs.get("max_token_length", 2200)
+
+        is_training = kwargs.get("is_training", True)
+        if not (path.endswith(".jsonl") or path.endswith(".json")):
+            # jsonl list file
+            data_split_num = kwargs.get("data_split_num", 1)
+            data_split_i = kwargs.get("data_split_i", 0)
+
+            if not is_training:
+                data_split_num = 1
+                data_split_i = 0
+            with open(path, encoding="utf-8") as fin:
+                file_list_all = fin.readlines()
+
+                num_per_slice = (len(file_list_all) - 1) // data_split_num + 1  # 16
+                file_list = file_list_all[
+                    data_split_i * num_per_slice : (data_split_i + 1) * num_per_slice
+                ]
+                logging.info(
+                    f"is_training: {is_training}, data_split_num: {data_split_num}, data_split_i: {data_split_i}, \nfile_list: {file_list}, \nfile_list_all: {file_list_all}"
+                )
+
+        else:
+            file_list = [path]
+
+        contents = []
+        for file_json in file_list:
+            with open(file_json.strip(), encoding="utf-8") as fin:
+                for line in fin:
+                    data_dict = json.loads(line.strip())
+                    data = data_dict["messages"]
+                    speech_length = data_dict.get("speech_length", -1) // 8
+                    text_length = data_dict.get("text_length", 0)
+
+                    system, user, assistant = [], [], []
+                    for i, item in enumerate(data):
+                        role = item["role"]
+                        content = item["content"]
+                        if role == "system":
+                            system.append(content)
+                        elif role == "user":
+                            user.append(content)
+                        elif role == "assistant":
+                            assistant.append(content)
+
+                    system = system * len(user)
+
+                    contents_i = {
+                        "system": system,
+                        "user": user,
+                        "assistant": assistant,
+                        "source_len": speech_length + text_length,
+                    }
+                    contents.append(contents_i)
+
+        self.contents = contents
+
+        logging.info("total_num of samplers: {}, {}".format(len(self.contents), path))
+
+    def __len__(self):
+        return len(self.contents)
+
+    def __getitem__(self, index):
+
+        data = self.contents[index]
+
+        return data
+
+    def get_source_len(self, data_dict):
+        source_len = data_dict.get("source_len", -1)
+        if source_len < 0:
+            source_len = len(data_dict["system"]) + len(data_dict["user"])
+        return source_len
+
+    def get_target_len(self, data_dict):
+
+        return 0
+
+
+if __name__ == "__main__":
+    index_ds = OpenAIIndexDSJsonl(
+        path="/Users/zhifu/funasr1.0/test_local/data_tmp/tmp_wav_10.jsonl"
+    )
+    print(index_ds.contents)
+    pass
diff --git a/funasr/datasets/sense_voice_datasets/datasets.py b/funasr/datasets/sense_voice_datasets/datasets.py
index 690a1c5..c0beda1 100644
--- a/funasr/datasets/sense_voice_datasets/datasets.py
+++ b/funasr/datasets/sense_voice_datasets/datasets.py
@@ -1,5 +1,6 @@
 import logging
 
+import re
 import torch
 import random
 import traceback
diff --git a/funasr/models/llm_asr/adaptor.py b/funasr/models/llm_asr/adaptor.py
index 9b79ed2..93534fe 100644
--- a/funasr/models/llm_asr/adaptor.py
+++ b/funasr/models/llm_asr/adaptor.py
@@ -83,25 +83,27 @@
         from funasr.models.transformer.attention import MultiHeadedAttention
         from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForward
 
-        self.blocks = nn.ModuleList(
-            [
-                EncoderLayer(
-                    llm_dim,
-                    MultiHeadedAttention(
-                        kwargs.get("attention_heads", 8),
+        self.blocks = None
+        if kwargs.get("n_layer", 2) > 0:
+            self.blocks = nn.ModuleList(
+                [
+                    EncoderLayer(
                         llm_dim,
-                        kwargs.get("attention_dropout_rate", 0.0),
-                    ),
-                    PositionwiseFeedForward(
-                        llm_dim,
-                        llm_dim // 4,
+                        MultiHeadedAttention(
+                            kwargs.get("attention_heads", 8),
+                            llm_dim,
+                            kwargs.get("attention_dropout_rate", 0.0),
+                        ),
+                        PositionwiseFeedForward(
+                            llm_dim,
+                            llm_dim // 4,
+                            kwargs.get("dropout_rate", 0.0),
+                        ),
                         kwargs.get("dropout_rate", 0.0),
-                    ),
-                    kwargs.get("dropout_rate", 0.0),
-                )
-                for i in range(kwargs.get("n_layer", 2))
-            ]
-        )
+                    )
+                    for i in range(kwargs.get("n_layer", 2))
+                ]
+            )
 
     def forward(self, x, ilens=None):
 
@@ -123,6 +125,8 @@
         olens = None
         olens = (ilens - 1) // self.k + 1
         masks = (~make_pad_mask(olens)[:, None, :]).to(x.device)
-        for layer, block in enumerate(self.blocks):
-            x, masks = block(x, masks)
+
+        if self.blocks is not None:
+            for layer, block in enumerate(self.blocks):
+                x, masks = block(x, masks)
         return x, olens
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 4345f69..519918c 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -6,7 +6,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 from torch.cuda.amp import autocast
-
+import re
 from funasr.models.scama.utils import sequence_mask
 from funasr.losses.label_smoothing_loss import LabelSmoothingLoss
 from funasr.models.ctc.ctc import CTC
@@ -18,6 +18,8 @@
 from funasr.utils import postprocess_utils
 from funasr.utils.datadir_writer import DatadirWriter
 from funasr.register import tables
+from funasr.train_utils.device_funcs import to_device
+import traceback
 
 
 @tables.register("model_classes", "LLMASR")
@@ -341,3 +343,431 @@
             ibest_writer["text"][key[0]] = text
 
         return results, meta_data
+
+
+@tables.register("model_classes", "LLMASR2")
+class LLMASR2(nn.Module):
+    """ """
+
+    def __init__(
+        self,
+        specaug: str = None,
+        specaug_conf: dict = None,
+        normalize: str = None,
+        normalize_conf: dict = None,
+        audio_encoder: str = None,
+        audio_encoder_conf: dict = None,
+        audio_adaptor: str = None,
+        audio_adaptor_conf: dict = None,
+        decoder: str = None,
+        decoder_conf: dict = None,
+        ctc: str = None,
+        ctc_conf: dict = None,
+        ctc_weight: float = 0.5,
+        llm: str = None,
+        llm_conf: dict = None,
+        input_size: int = 80,
+        vocab_size: int = -1,
+        ignore_id: int = -1,
+        blank_id: int = 0,
+        sos: int = 1,
+        eos: int = 2,
+        lsm_weight: float = 0.0,
+        length_normalized_loss: bool = False,
+        report_cer: bool = True,
+        report_wer: bool = True,
+        sym_space: str = "<space>",
+        sym_blank: str = "<blank>",
+        # extract_feats_in_collect_stats: bool = True,
+        share_embedding: bool = False,
+        # preencoder: Optional[AbsPreEncoder] = None,
+        # postencoder: Optional[AbsPostEncoder] = None,
+        **kwargs,
+    ):
+
+        super().__init__()
+
+        # audio encoder
+        hub = audio_encoder_conf.get("hub", None)
+        if hub == "ms":
+            from funasr import AutoModel
+
+            model = AutoModel(model=audio_encoder, model_revision="master")
+            # frontend = model.kwargs.get("frontend")
+            audio_encoder_output_size = model.model.encoder_output_size
+
+            audio_encoder = model.model.model.encoder
+
+            # self.frontend = frontend
+
+        elif hub == "hf":
+            pass
+        else:
+            encoder_class = tables.encoder_classes.get(audio_encoder)
+            audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
+            audio_encoder_output_size = audio_encoder.output_size()
+        freeze = audio_encoder_conf.get("freeze", True)
+        if freeze:
+            for name, param in audio_encoder.named_parameters():
+                param.requires_grad = False
+            audio_encoder.eval()
+
+        self.audio_encoder = audio_encoder
+
+        # llm
+        hub = llm_conf.get("hub", "hf")
+        self.llm = None
+        if hub == "hf":
+            from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
+
+            init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
+
+            model = AutoModelForCausalLM.from_pretrained(
+                init_param_path,
+                load_in_8bit=None,
+                device_map=None,
+                use_cache=None,
+            )
+            freeze = llm_conf.get("freeze", True)
+            if freeze:
+                for name, param in model.named_parameters():
+                    param.requires_grad = False
+                model.eval()
+            self.llm = model
+
+        # adaptor
+        adaptor_class = tables.adaptor_classes.get(audio_adaptor)
+        audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
+        audio_adaptor = adaptor_class(**audio_adaptor_conf)
+
+        self.audio_adaptor = audio_adaptor
+
+        self.error_calculator = None
+
+        self.length_normalized_loss = length_normalized_loss
+        self.beam_search = None
+
+    def forward(
+        self,
+        speech: torch.Tensor,
+        speech_lengths: torch.Tensor,
+        input_ids: torch.Tensor,
+        attention_mask: torch.Tensor,
+        labels_ids: torch.Tensor,
+        fbank_beg: torch.Tensor,
+        fbank_mask: torch.Tensor,
+        **kwargs,
+    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
+        """Encoder + Decoder + Calc loss
+        Args:
+                speech: (Batch, Length, ...)
+                speech_lengths: (Batch, )
+                text: (Batch, Length)
+                text_lengths: (Batch,)
+        """
+        # import pdb;
+        # pdb.set_trace()
+        if len(speech_lengths.size()) > 1:
+            speech_lengths = speech_lengths[:, 0]
+
+        batch_size, frames, _ = speech.shape
+
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        # audio_adaptor
+        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+
+        input_ids[input_ids < 0] = 0
+        inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
+
+        batch_size, token_num, dims = inputs_embeds.shape
+        fbank_mask[fbank_mask < 0] = 0
+        fbank_fake_lens = fbank_mask.sum(-1).to(torch.int32)
+        # _, l, _ = encoder_out.shape
+        for batch_idx in range(batch_size):
+
+            fbank_fake_len = fbank_fake_lens[batch_idx].item()
+            fbank_beg_idx = fbank_beg[batch_idx, 0].item()
+            min_len = min(fbank_fake_len, inputs_embeds.shape[1] - fbank_beg_idx)
+
+            try:
+                inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                    batch_idx, :min_len, :
+                ]
+            except Exception as e:
+                logging.error(f"{str(e)}, {traceback.format_exc()}")
+                logging.info(
+                    f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, min_len: {min_len}, fbank_fake_len: {fbank_fake_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens[batch_idx].item()}"
+                )
+                fbank_fake_len = encoder_out_lens[batch_idx].item()
+                min_len = min(fbank_fake_len, min_len)
+                inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                    batch_idx, :min_len, :
+                ]
+
+        labels_ids[labels_ids == -1] = -100
+
+        model_outputs = self.llm(
+            inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+        )
+        loss = model_outputs.loss
+
+        stats = {}
+        with torch.no_grad():
+            preds = torch.argmax(model_outputs.logits, -1)
+            acc_att = compute_accuracy(preds[:, :-1], labels_ids[:, 1:], ignore_label=-100)
+            stats["acc"] = acc_att
+
+        stats["loss"] = torch.clone(loss.detach())
+        stats["batch_size"] = batch_size
+        stats["batch_size_x_frames"] = frames * batch_size
+        stats["batch_size_real_frames"] = speech_lengths.sum().item()
+        stats["padding_frames"] = stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
+        stats["batch_size_x_tokens"] = token_num * batch_size
+        stats["batch_size_real_tokens"] = attention_mask.sum().item()
+        stats["padding_tokens"] = stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
+
+        # force_gatherable: to-device and to-tensor if scalar for DataParallel
+        if self.length_normalized_loss:
+            batch_size = int((labels_ids > 0 + 1).sum())
+        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
+        return loss, stats, weight
+
+    def data_template(self, data):
+        system, user, assistant = [], [], []
+        for i, item in enumerate(data):
+            role = item["role"]
+            content = item["content"]
+            if role == "system":
+                system.append(content)
+            elif role == "user":
+                user.append(content)
+            elif role == "assistant":
+                assistant.append(content)
+
+        system = system * len(user)
+
+        contents = {
+            "system": system,
+            "user": user,
+            "assistant": assistant,
+        }
+
+        return contents
+
+    def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
+
+        system = contents["system"]
+        user = contents["user"]
+        assistant = contents["assistant"]
+        pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
+        input_ids, labels, source_ids, target_ids, fbank, fbank_lens, fbank_mask, fbank_beg = (
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+        )
+
+        for i, (system_prompt, user_prompt, target_out) in enumerate(zip(system, user, assistant)):
+
+            source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
+
+            splits = pattern.split(source_input)
+            source_ids_i = []
+            fbank_mask_i = []
+            fbank_beg_i = []
+            fbank_lens_i = []
+            # target_ids_i = []
+            for k, sub_str in enumerate(splits):
+                if not sub_str.startswith("<|startofspeech|>"):
+                    sub_token = tokenizer.encode(sub_str)
+                    source_ids_i += sub_token
+                    fbank_mask_i += [0] * len(sub_token)
+                else:
+                    sub_str = sub_str.replace("<|startofspeech|>", "").replace(
+                        "<|endofspeech|>", ""
+                    )
+                    if sub_str.startswith("!"):
+                        try:
+                            time1 = time.perf_counter()
+                            data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+                            time2 = time.perf_counter()
+                            meta_data["load_data"] = f"{time2 - time1:0.3f}"
+                        except Exception as e:
+                            logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
+
+                        speech, speech_lengths = extract_fbank(
+                            data_src,
+                            data_type=kwargs.get("data_type", "sound"),
+                            frontend=frontend,
+                            is_final=True,
+                        )  # speech: [b, T, d]
+
+                        time3 = time.perf_counter()
+                        meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+                        meta_data["batch_data_time"] = (
+                            speech_lengths.sum().item()
+                            * frontend.frame_shift
+                            * frontend.lfr_n
+                            / 1000
+                        )
+
+                        if kwargs.get("permute", True):
+                            speech = speech.permute(0, 2, 1)
+
+                        olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                        olens = 1 + (olens - 3 + 2 * 1) // 2
+                        sub_token_len = (olens - 1) // 2 + 1
+                        sub_token = [0] * sub_token_len
+                        fbank_beg_i = [len(source_ids_i)]
+                        source_ids_i += sub_token
+                        fbank_mask_i += [1] * len(sub_token)
+
+            source_mask = [-100] * len(source_ids_i)
+            target_out = f"{target_out}<|im_end|>"
+            target_ids = tokenizer.encode(target_out)
+            input_ids += source_ids_i + target_ids
+            labels += source_mask + target_ids
+            fbank_mask += fbank_mask_i
+            fbank_beg.append(fbank_beg_i)
+
+        input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
+        attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+        labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
+        source_ids = torch.tensor(source_ids_i, dtype=torch.int64)
+        target_ids = torch.tensor(target_ids, dtype=torch.int64)
+
+        fbank = speech[0, :, :]
+        fbank_lens = speech_lengths
+        fbank_mask = torch.tensor(fbank_mask, dtype=torch.float32)
+        fbank_beg = torch.tensor(fbank_beg, dtype=torch.int32)
+
+        output = {
+            "speech": fbank[None, :, :],
+            "speech_lengths": fbank_lens[:, None],
+            "fbank_mask": fbank_mask[None, :],
+            "fbank_beg": fbank_beg[None,],
+            "input_ids": input_ids[None, :],
+            "attention_mask": attention_mask[None, :],
+            "labels_ids": labels[None, :],
+            "source_ids": source_ids[None, :],
+            "target_ids": target_ids[None, :],
+        }
+
+        return output
+
+    def inference(
+        self,
+        data_in,
+        data_lengths=None,
+        key: list = None,
+        tokenizer=None,
+        frontend=None,
+        **kwargs,
+    ):
+
+        meta_data = {}
+        prompt = kwargs.get("prompt", None)
+
+        if kwargs.get("batch_size", 1) > 1:
+            raise NotImplementedError("batch decoding is not implemented")
+
+        contents = self.data_template(data_in[0])
+        output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
+        batch = to_device(output, kwargs["device"])
+
+        # audio encoder
+        speech = batch["speech"]
+        speech_lengths = batch["speech_lengths"][:, 0]
+        # fp16
+        if kwargs.get("fp16", False):
+            speech = speech.to(torch.float16)
+            encoder_out_lens = encoder_out_lens.to(torch.float16)
+        elif kwargs.get("bf16", False):
+            speech = speech.to(torch.bfloat16)
+            encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        # audio_adaptor
+        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+
+        input_ids = batch["input_ids"]
+        source_ids = batch["source_ids"]
+        if not kwargs.get("tearchforing", False):
+            input_ids = source_ids
+        input_ids[input_ids < 0] = 0
+        inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
+
+        batch_size, token_num, dims = inputs_embeds.shape
+        fbank_beg = batch["fbank_beg"]
+        for batch_idx in range(batch_size):
+
+            min_len = encoder_out_lens[batch_idx].item()
+            fbank_beg_idx = fbank_beg[batch_idx]
+            inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                batch_idx, :min_len, :
+            ]
+
+        llm_dtype = kwargs.get("llm_dtype", "fp32")
+        dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
+        with torch.cuda.amp.autocast(dtype=dtype_map[llm_dtype]):
+            label = contents["assistant"][0]
+            # self.llm = self.llm.to(dtype_map[llm_dtype])
+            # inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
+
+            if not kwargs.get("tearchforing", False):
+
+                generated_ids = self.llm.generate(
+                    inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
+                )
+                # generated_ids = [
+                #     output_ids[len(input_id) :]
+                #     for input_id, output_ids in zip(input_ids, generated_ids)
+                # ]
+                response = tokenizer.batch_decode(
+                    generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
+                )[0]
+
+                loss = None
+            else:
+
+                labels_ids = batch["labels_ids"]
+                labels_ids[labels_ids == -1] = -100
+                attention_mask = batch.get("attention_mask", None)
+                # attention_mask = attention_mask.to(dtype_map[llm_dtype])
+                model_outputs = self.llm(
+                    inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+                )
+
+                preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
+                response = tokenizer.batch_decode(
+                    preds,
+                    add_special_tokens=False,
+                    skip_special_tokens=kwargs.get("skip_special_tokens", True),
+                )[0]
+                loss = model_outputs.loss.item()
+
+        ibest_writer = None
+        if kwargs.get("output_dir") is not None:
+            if not hasattr(self, "writer"):
+                self.writer = DatadirWriter(kwargs.get("output_dir"))
+            ibest_writer = self.writer[f"{0 + 1}best_recog"]
+
+        results = []
+        response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response)
+        result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label}
+        if loss is not None:
+            result_i["loss"] = loss
+        results.append(result_i)
+
+        if ibest_writer is not None:
+            ibest_writer["text"][key[0]] = response
+            ibest_writer["label"][key[0]] = label
+            ibest_writer["text_tn"][key[0]] = response_clean
+
+        return results, meta_data
diff --git a/funasr/models/transformer/attention.py b/funasr/models/transformer/attention.py
index e4add5c..6e6f754 100644
--- a/funasr/models/transformer/attention.py
+++ b/funasr/models/transformer/attention.py
@@ -82,7 +82,10 @@
         n_batch = value.size(0)
         if mask is not None:
             mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
-            min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
+
+            min_value = -float(
+                "inf"
+            )  # min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
             scores = scores.masked_fill(mask, min_value)
             self.attn = torch.softmax(scores, dim=-1).masked_fill(
                 mask, 0.0
diff --git a/funasr/train_utils/model_summary.py b/funasr/train_utils/model_summary.py
index 4e92a33..842cd21 100644
--- a/funasr/train_utils/model_summary.py
+++ b/funasr/train_utils/model_summary.py
@@ -47,10 +47,18 @@
 def model_summary(model: torch.nn.Module) -> str:
     message = "Model structure:\n"
     message += str(model)
-    # for p in model.parameters():
-    #     print(f"{p.numel()}")
-    tot_params = sum(p.numel() for p in model.parameters())
-    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
+
+    tot_params, num_params = 0, 0
+    for name, param in model.named_parameters():
+        print(
+            "name: {}, dtype: {}, device: {}, trainable: {}, shape: {}, numel: {}".format(
+                name, param.dtype, param.device, param.requires_grad, param.shape, param.numel()
+            )
+        )
+        tot_params += param.numel()
+        if param.requires_grad:
+            num_params += param.numel()
+
     percent_trainable = "{:.1f}".format(num_params * 100.0 / tot_params)
     tot_params = get_human_readable_count(tot_params)
     num_params = get_human_readable_count(num_params)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 50f99f0..afc632d 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -85,7 +85,12 @@
         self.batch_total = 0
         self.use_fp16 = use_fp16
         self.save_checkpoint_interval = kwargs.get("save_checkpoint_interval", 5000)
-        self.validate_interval = kwargs.get("validate_interval", 5000)
+        self.validate_interval = kwargs.get("validate_interval", -1)
+        if self.validate_interval < 0:
+            self.validate_interval = self.save_checkpoint_interval
+        assert (
+            self.save_checkpoint_interval == self.validate_interval
+        ), f"save_checkpoint_interval must equal to validate_interval"
         self.keep_nbest_models = kwargs.get("keep_nbest_models", 500)
         self.avg_keep_nbest_models_type = kwargs.get("avg_keep_nbest_models_type", "acc")
         self.avg_nbest_model = kwargs.get("avg_nbest_model", 10)
@@ -476,7 +481,7 @@
                     step_in_epoch=self.step_in_epoch,
                     batch_num_epoch=batch_num_epoch,
                     lr=lr,
-                    loss=loss.detach().cpu().item(),
+                    loss=accum_grad * loss.detach().cpu().item(),
                     speed_stats=speed_stats,
                     stats=stats,
                     writer=writer,
diff --git a/funasr/train_utils/trainer_ds.py b/funasr/train_utils/trainer_ds.py
index ec887cc..ec76531 100644
--- a/funasr/train_utils/trainer_ds.py
+++ b/funasr/train_utils/trainer_ds.py
@@ -167,6 +167,8 @@
         Args:
             epoch (int): The epoch number at which the checkpoint is being saved.
         """
+        if self.use_ddp or self.use_fsdp:
+            dist.barrier()
         step_in_epoch = None if step is None else step_in_epoch
         if self.use_deepspeed:
 
@@ -760,6 +762,10 @@
             ckpt_name = f'model.pt.ep{epoch}.{kwargs.get("step_in_epoch")}'
         self.val_acc_step_or_eoch[ckpt_name] = self.val_acc_avg
         self.val_loss_step_or_eoch[ckpt_name] = self.val_loss_avg
+
+        if self.use_ddp or self.use_fsdp or self.use_deepspeed:
+            dist.barrier()
+
         model.train()
 
     def log(

--
Gitblit v1.9.1