From 45d7aa9004763684fb748ee17942ecba81042201 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 19 六月 2024 10:26:40 +0800
Subject: [PATCH] decoding

---
 examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh            |   34 
 funasr/models/llm_asr/model.py                                              |  619 ++++++++++++++++++++++++-
 funasr/bin/train_ds.py                                                      |    9 
 funasr/download/download_from_hub.py                                        |   12 
 funasr/auto/auto_model.py                                                   |   10 
 funasr/datasets/openai_datasets/datasets.py                                 |  255 ++++++++++
 funasr/models/paraformer/cif_predictor.py                                   |   40 -
 funasr/train_utils/load_pretrained_model.py                                 |   58 -
 funasr/models/sense_voice/model.py                                          |  288 +++++++++++
 examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml |    2 
 examples/industrial_data_pretraining/llm_asr/demo_speech2text.py            |    3 
 examples/industrial_data_pretraining/sense_voice/demo_ctc.py                |   25 +
 funasr/datasets/openai_datasets/index_ds.py                                 |    5 
 funasr/train_utils/trainer_ds.py                                            |   61 ++
 docs/images/wechat.png                                                      |    0 
 examples/industrial_data_pretraining/ctc/demo.py                            |    7 
 examples/industrial_data_pretraining/ctc/infer_from_local.sh                |    0 
 17 files changed, 1,275 insertions(+), 153 deletions(-)

diff --git a/docs/images/wechat.png b/docs/images/wechat.png
index 705ff75..8d37700 100644
--- a/docs/images/wechat.png
+++ b/docs/images/wechat.png
Binary files differ
diff --git a/examples/industrial_data_pretraining/ctc/demo.py b/examples/industrial_data_pretraining/ctc/demo.py
index 85a748a..b9d1647 100644
--- a/examples/industrial_data_pretraining/ctc/demo.py
+++ b/examples/industrial_data_pretraining/ctc/demo.py
@@ -6,8 +6,11 @@
 import sys
 from funasr import AutoModel
 
-model_dir=sys.argv[1]
-input_file=sys.argv[2]
+
+model_dir = "/Users/zhifu/Downloads/modelscope_models/ctc_model"
+input_file = (
+    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+)
 
 model = AutoModel(
     model=model_dir,
diff --git a/examples/industrial_data_pretraining/ctc/infer_from_local.py b/examples/industrial_data_pretraining/ctc/infer_from_local.sh
similarity index 100%
rename from examples/industrial_data_pretraining/ctc/infer_from_local.py
rename to examples/industrial_data_pretraining/ctc/infer_from_local.sh
diff --git a/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
index 483f219..48bd0cf 100644
--- a/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
+++ b/examples/industrial_data_pretraining/llm_asr/conf/whisper_qwen_linear2.yaml
@@ -69,7 +69,7 @@
   batch_size_scale_ratio_max: 2
   num_workers: 4
   audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate}
-  audio_encoder_downsample_rate: 2
+  audio_encoder_downsample_rate: 4
   data_split_num: 512
   batch_size_sample_max: 15
   retry: 20
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
index e5e3e23..41b3440 100644
--- a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
+++ b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.py
@@ -28,6 +28,9 @@
     init_param=f"{os.path.join(ckpt_dir, ckpt_id)}",
     output_dir=output_dir,
     device=device,
+    fp16=False,
+    bf16=False,
+    llm_dtype="bf16",
 )
 
 
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
index d4c409b..57299fc 100644
--- a/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
+++ b/examples/industrial_data_pretraining/llm_asr/demo_speech2text.sh
@@ -12,6 +12,7 @@
 out_dir="${ckpt_dir}/inference-${ckpt_id}"
 mkdir -p ${out_dir}
 for data_set in "librispeech_test_clean_speech2text.jsonl" "librispeech_test_other_speech2text.jsonl"; do
+{
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
     mkdir -p ${output_dir}
@@ -22,10 +23,12 @@
 
     python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=false
 
+}&
 done
+wait
 
-
-for data_set in "aishell1_test_speech2text.jsonl" "aishell2_ios_test_speech2text.jsonl" "librispeech_test_other_speech2text.jsonl"; do
+for data_set in "aishell1_test_speech2text.jsonl" "aishell2_ios_test_speech2text.jsonl"; do
+{
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
     mkdir -p ${output_dir}
@@ -36,9 +39,12 @@
 
     python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=true
 
+}&
 done
+wait
 
-for data_set in "s2tt_en2zh.v20240605.test.jsonl"; do
+for data_set in "common_voice_zh-CN_speech2text.jsonl" "common_voice_en_speech2text.jsonl"; do
+{
     jsonl=${jsonl_dir}/${data_set}
     output_dir=${out_dir}/${data_set}
     mkdir -p ${output_dir}
@@ -47,19 +53,13 @@
 
     python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
 
-    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=true
+    cn_postprocess=false
+    if [ $data_set = "common_voice_zh-CN_speech2text.jsonl" ];then
+      cn_postprocess=true
+    fi
 
+    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=${cn_postprocess}
+
+}&
 done
-
-for data_set in "s2tt_zh2en.v20240605.test.jsonl"; do
-    jsonl=${jsonl_dir}/${data_set}
-    output_dir=${out_dir}/${data_set}
-    mkdir -p ${output_dir}
-    pred_file=${output_dir}/1best_recog/text_tn
-    ref_file=${output_dir}/1best_recog/label
-
-    python ./demo_speech2text.py ${ckpt_dir} ${ckpt_id} ${jsonl} ${output_dir} ${device}
-
-    python /mnt/workspace/zhifu.gzf/codebase/FunASR/funasr/metrics/wer.py ++ref_file=${ref_file} ++hyp_file=${pred_file} ++cer_file=${pred_file}.cer ++cn_postprocess=false
-
-done
\ No newline at end of file
+wait
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/sense_voice/demo_ctc.py b/examples/industrial_data_pretraining/sense_voice/demo_ctc.py
new file mode 100644
index 0000000..064d1e9
--- /dev/null
+++ b/examples/industrial_data_pretraining/sense_voice/demo_ctc.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+import sys
+from funasr import AutoModel
+
+model_dir = "/Users/zhifu/Downloads/modelscope_models/SenseVoiceCTC"
+input_file = (
+    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+)
+
+model = AutoModel(
+    model=model_dir,
+)
+
+res = model.generate(
+    input=input_file,
+    cache={},
+    language="zh",
+    text_norm="wotextnorm",
+)
+
+print(res)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 91e80d8..a6cd3a6 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -213,7 +213,6 @@
         deep_update(model_conf, kwargs.get("model_conf", {}))
         deep_update(model_conf, kwargs)
         model = model_class(**model_conf, vocab_size=vocab_size)
-        model.to(device)
 
         # init_param
         init_param = kwargs.get("init_param", None)
@@ -236,6 +235,7 @@
             model.to(torch.float16)
         elif kwargs.get("bf16", False):
             model.to(torch.bfloat16)
+        model.to(device)
         return model, kwargs
 
     def __call__(self, *args, **cfg):
@@ -324,7 +324,7 @@
             input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg
         )
         end_vad = time.time()
-            
+
         #  FIX(gcf): concat the vad clips for sense vocie model for better aed
         if kwargs.get("merge_vad", False):
             for i in range(len(res)):
@@ -466,7 +466,7 @@
                             result[k] = restored_data[j][k]
                         else:
                             result[k] += restored_data[j][k]
-                            
+
             if not len(result["text"].strip()):
                 continue
             return_raw_text = kwargs.get("return_raw_text", False)
@@ -481,7 +481,7 @@
                 if return_raw_text:
                     result["raw_text"] = raw_text
                 result["text"] = punc_res[0]["text"]
-                
+
             # speaker embedding cluster after resorted
             if self.spk_model is not None and kwargs.get("return_spk_res", True):
                 if raw_text is None:
@@ -602,6 +602,6 @@
         )
 
         with torch.no_grad():
-            export_dir = export_utils.export(model=model, data_in=data_list,  **kwargs)
+            export_dir = export_utils.export(model=model, data_in=data_list, **kwargs)
 
         return export_dir
diff --git a/funasr/bin/train_ds.py b/funasr/bin/train_ds.py
index 5b1d4fd..6c0f174 100644
--- a/funasr/bin/train_ds.py
+++ b/funasr/bin/train_ds.py
@@ -84,6 +84,8 @@
         dist.init_process_group(backend=kwargs.get("backend", "nccl"), init_method="env://")
         torch.cuda.set_device(local_rank)
 
+    # rank = dist.get_rank()
+
     logging.info("Build model, frontend, tokenizer")
     device = kwargs.get("device", "cuda")
     kwargs["device"] = "cpu"
@@ -124,6 +126,7 @@
         use_ddp=use_ddp,
         use_fsdp=use_fsdp,
         device=kwargs["device"],
+        excludes=kwargs.get("excludes", None),
         output_dir=kwargs.get("output_dir", "./exp"),
         **kwargs.get("train_conf"),
     )
@@ -143,7 +146,7 @@
     dataloader = dataloader_class(**kwargs)
     # dataloader_tr, dataloader_val = dataloader_class(**kwargs)
 
-    scaler = GradScaler(enabled=trainer.use_fp16) if trainer.use_fp16 else None
+    scaler = GradScaler(enabled=True) if trainer.use_fp16 or trainer.use_bf16 else None
     scaler = ShardedGradScaler(enabled=trainer.use_fp16) if trainer.use_fsdp else scaler
 
     trainer.resume_checkpoint(
@@ -182,7 +185,7 @@
 
             time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
             logging.info(
-                f"rank: {local_rank}, "
+                f"\n\nrank: {local_rank}, "
                 f"time_escaped_epoch: {time_escaped:.3f} hours, "
                 f"estimated to finish {dataloader.data_split_num} data_slices, remaining: {dataloader.data_split_num-data_split_i} slices, {(dataloader.data_split_num-data_split_i)*time_escaped:.3f} hours, "
                 f"epoch: {trainer.max_epoch - epoch} epochs, {((trainer.max_epoch - epoch - 1)*dataloader.data_split_num + dataloader.data_split_num-data_split_i)*time_escaped:.3f} hours\n"
@@ -199,7 +202,7 @@
         time2 = time.perf_counter()
         time_escaped = (time2 - time1) / 3600.0
         logging.info(
-            f"rank: {local_rank}, "
+            f"\n\nrank: {local_rank}, "
             f"time_escaped_epoch: {time_escaped:.3f} hours, "
             f"estimated to finish {trainer.max_epoch} "
             f"epoch: {(trainer.max_epoch - epoch) * time_escaped:.3f} hours\n"
diff --git a/funasr/datasets/openai_datasets/datasets.py b/funasr/datasets/openai_datasets/datasets.py
index 8d243ac..04ddcfd 100644
--- a/funasr/datasets/openai_datasets/datasets.py
+++ b/funasr/datasets/openai_datasets/datasets.py
@@ -64,6 +64,8 @@
         self.max_token_length = kwargs.get("max_token_length", 1024)
         self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
         self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
+        self.audio_adaptor_downsample_rate = kwargs.get("audio_adaptor_downsample_rate", 2)
+        self.audio_encoder_downsample_rate = kwargs.get("audio_encoder_downsample_rate", 4)
 
     def get_source_len(self, index):
         item = self.index_ds[index]
@@ -136,10 +138,13 @@
                                 speech = speech.permute(0, 2, 1)
                             # if speech_lengths > self.batch_size:
                             #     continue
+                            if self.audio_encoder_downsample_rate == 4:
+                                olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                                olens = 1 + (olens - 3 + 2 * 1) // 2
+                            elif self.audio_encoder_downsample_rate == 1:
+                                olens = speech_lengths[0].item()
 
-                            olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
-                            olens = 1 + (olens - 3 + 2 * 1) // 2
-                            sub_token_len = (olens - 1) // 2 + 1
+                            sub_token_len = (olens - 1) // self.audio_adaptor_downsample_rate + 1
                             sub_token = [0] * sub_token_len
                             fbank_beg_i = [len(source_ids)]
                             source_ids += sub_token
@@ -222,3 +227,247 @@
             break
 
         return outputs
+
+
+@tables.register("dataset_classes", "OpenAIDatasetMultiTurn")
+class OpenAIDatasetMultiTurn(torch.utils.data.Dataset):
+    """
+    SenseVoiceDataset
+    """
+
+    def __init__(
+        self,
+        path,
+        index_ds: str = None,
+        frontend=None,
+        tokenizer=None,
+        int_pad_value: int = -1,
+        float_pad_value: float = 0.0,
+        **kwargs,
+    ):
+        super().__init__()
+        index_ds_class = tables.index_ds_classes.get(index_ds)
+        self.index_ds = index_ds_class(path, **kwargs)
+        preprocessor_speech = kwargs.get("preprocessor_speech", None)
+        if preprocessor_speech:
+            preprocessor_speech_class = tables.preprocessor_classes.get(preprocessor_speech)
+            preprocessor_speech = preprocessor_speech_class(
+                **kwargs.get("preprocessor_speech_conf")
+            )
+        self.preprocessor_speech = preprocessor_speech
+        preprocessor_text = kwargs.get("preprocessor_text", None)
+        if preprocessor_text:
+            preprocessor_text_class = tables.preprocessor_classes.get(preprocessor_text)
+            preprocessor_text = preprocessor_text_class(**kwargs.get("preprocessor_text_conf"))
+        self.preprocessor_text = preprocessor_text
+
+        self.frontend = frontend
+        self.fs = 16000 if frontend is None else frontend.fs
+        self.data_type = "sound"
+        self.tokenizer = tokenizer
+
+        self.int_pad_value = int_pad_value
+        self.float_pad_value = float_pad_value
+        self.sos = kwargs.get("sos", "<|startoftranscript|>")
+        self.eos = kwargs.get("eos", "<|endoftext|>")
+        self.batch_size = kwargs.get("batch_size")
+        self.batch_type = kwargs.get("batch_type")
+        self.prompt_ids_len = 0
+        self.retry = kwargs.get("retry", 100)
+
+        self.permute = False
+        from funasr.frontends.whisper_frontend import WhisperFrontend
+
+        if isinstance(self.frontend, WhisperFrontend):
+            self.permute = True
+
+        self.pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
+        # self.kwargs = kwargs
+        self.max_token_length = kwargs.get("max_token_length", 1024)
+        self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
+        self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
+        self.multiturn_num_max = kwargs.get("multiturn_num_max", 5)
+
+    def get_source_len(self, index):
+        item = self.index_ds[index]
+        return self.index_ds.get_source_len(item)
+
+    def get_target_len(self, index):
+        item = self.index_ds[index]
+        return self.index_ds.get_target_len(item)
+
+    def __len__(self):
+        return len(self.index_ds)
+
+    def __getitem__(self, index):
+        # import pdb
+        #
+        # pdb.set_trace()
+
+        output = None
+
+        for idx in range(self.retry):
+            badcase_flag = False
+            if idx == 0:
+                index_cur = index
+            else:
+                index_cur = torch.randint(0, len(self.index_ds), ()).item()
+
+            item = self.index_ds[index_cur]
+
+            system = item["system"]
+            user = item["user"]
+            assistant = item["assistant"]
+
+            input_ids, labels, fbank, fbank_lens, fbank_mask, fbank_beg, fake_token_len = (
+                [],
+                [],
+                [],
+                [],
+                [],
+                [],
+                [],
+            )
+
+            for i, (system_prompt, user_prompt, target_out) in enumerate(
+                zip(system, user, assistant)
+            ):
+                if i >= self.multiturn_num_max:
+                    break
+                if i == 0:
+                    source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
+                else:
+                    source_input = (
+                        f"<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
+                    )
+
+                splits = self.pattern.split(source_input)
+                source_ids = []
+                fbank_i = []
+                fbank_mask_i = []
+                fake_token_len_i = 0
+                fbank_beg_i = -1
+                fbank_lens_i = []
+                for k, sub_str in enumerate(splits):
+                    if not sub_str.startswith("<|startofspeech|>"):
+                        sub_token = self.tokenizer.encode(sub_str)
+                        source_ids += sub_token
+                        fbank_mask_i += [0] * len(sub_token)
+                    else:
+                        sub_str = sub_str.replace("<|startofspeech|>", "").replace(
+                            "<|endofspeech|>", ""
+                        )
+                        if sub_str.startswith("!"):
+                            try:
+                                data_src = load_audio_text_image_video(sub_str[1:], fs=self.fs)
+                            except Exception as e:
+                                logging.error(
+                                    f"Loading wav failed! {str(e)}, {traceback.format_exc()}"
+                                )
+                                badcase_flag = True
+                                continue
+                            speech, speech_lengths = extract_fbank(
+                                data_src,
+                                data_type=self.data_type,
+                                frontend=self.frontend,
+                                is_final=True,
+                            )  # speech: [b, T, d]
+                            if self.permute:
+                                speech = speech.permute(0, 2, 1)
+                            # if speech_lengths > self.batch_size:
+                            #     continue
+
+                            olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                            olens = 1 + (olens - 3 + 2 * 1) // 2
+                            fake_token_len_i = (olens - 1) // 2 + 1
+                            fake_token = [0] * fake_token_len_i
+                            fbank_beg_i = len(source_ids)
+                            source_ids += fake_token
+                            fbank_mask_i += [1] * len(fake_token)
+
+                if badcase_flag:
+                    continue
+
+                fbank_beg += [fbank_beg_i + len(input_ids)]
+                fake_token_len += [fake_token_len_i]
+                source_mask = [-100] * len(source_ids)
+                target_out = f"{target_out}<|im_end|>"
+                target_ids = self.tokenizer.encode(target_out)
+                input_ids += source_ids + target_ids
+                labels += source_mask + target_ids
+                fbank.append(speech[0, :, :])
+                fbank_mask += fbank_mask_i
+                fbank_lens.append(speech_lengths)
+
+            if len(input_ids) > self.max_token_length:
+                logging.info(
+                    f"input_ids > max_token_length: {len(input_ids)}>{self.max_token_length}, {item}"
+                )
+                badcase_flag = True
+            if badcase_flag:
+                continue
+            input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
+            attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+            labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
+
+            # fbank = speech[0, :, :]
+            # fbank_lens = torch.tensor(fbank_lens, dtype=torch.int32)
+            fbank_mask = torch.tensor(fbank_mask, dtype=torch.float32)
+            fbank_beg = torch.tensor(fbank_beg, dtype=torch.int32)
+            fake_token_len = torch.tensor(fake_token_len, dtype=torch.int32)
+
+            output = {
+                "speech": fbank,
+                "speech_lengths": fbank_lens,
+                "fbank_mask": fbank_mask,
+                "fbank_beg": fbank_beg,
+                "fake_token_len": fake_token_len,
+                "input_ids": input_ids,
+                "attention_mask": attention_mask,
+                "labels_ids": labels,
+            }
+            break
+
+        return output
+
+    def collator(self, samples: list = None):
+
+        for idx in range(self.retry):
+            badcase_flag = False
+
+            outputs = {}
+            for sample in samples:
+                if sample is None:
+                    continue
+                for key in sample.keys():
+                    if key not in outputs:
+                        outputs[key] = []
+                    if isinstance(sample[key], (list, tuple)):
+                        outputs[key].extend(sample[key])
+                    else:
+                        outputs[key].append(sample[key])
+
+            for key, data_list in outputs.items():
+                if isinstance(data_list[0], torch.Tensor):
+                    if data_list[0].dtype == torch.int64 or data_list[0].dtype == torch.int32:
+
+                        pad_value = self.int_pad_value
+                    else:
+                        pad_value = self.float_pad_value
+
+                    outputs[key] = torch.nn.utils.rnn.pad_sequence(
+                        data_list, batch_first=True, padding_value=pad_value
+                    )
+
+            if self.batch_type != "example":
+                b, t = outputs["input_ids"].shape
+                if b > 1 and b * t > self.batch_size_token_max:
+                    logging.info(
+                        f"Warning, {idx}th, b*t: {b}*{t}={b * t} > batch_size_sample_max: {self.batch_size_token_max}, drop last data"
+                    )
+                    samples = samples[:-1]
+                    continue
+
+            break
+
+        return outputs
diff --git a/funasr/datasets/openai_datasets/index_ds.py b/funasr/datasets/openai_datasets/index_ds.py
index cc518f8..9943e2a 100644
--- a/funasr/datasets/openai_datasets/index_ds.py
+++ b/funasr/datasets/openai_datasets/index_ds.py
@@ -15,11 +15,6 @@
 
     def __init__(self, path: str, **kwargs):
         super().__init__()
-        self.max_source_length = kwargs.get("max_source_length", 2048)
-        self.min_source_length = kwargs.get("min_source_length", 0)
-        self.max_target_length = kwargs.get("max_target_length", 2048)
-        self.min_target_length = kwargs.get("min_target_length", 0)
-        self.max_token_length = kwargs.get("max_token_length", 2200)
 
         is_training = kwargs.get("is_training", True)
         if not (path.endswith(".jsonl") or path.endswith(".json")):
diff --git a/funasr/download/download_from_hub.py b/funasr/download/download_from_hub.py
index 075b131..46263c9 100644
--- a/funasr/download/download_from_hub.py
+++ b/funasr/download/download_from_hub.py
@@ -56,13 +56,13 @@
                 config = OmegaConf.load(cfg["config"])
                 kwargs = OmegaConf.merge(config, cfg)
                 kwargs["model"] = config["model"]
-    elif os.path.exists(os.path.join(model_or_path, "config.yaml")) and os.path.exists(
-        os.path.join(model_or_path, "model.pt")
-    ):
+    elif os.path.exists(os.path.join(model_or_path, "config.yaml")):
         config = OmegaConf.load(os.path.join(model_or_path, "config.yaml"))
         kwargs = OmegaConf.merge(config, kwargs)
-        init_param = os.path.join(model_or_path, "model.pb")
-        kwargs["init_param"] = init_param
+        init_param = os.path.join(model_or_path, "model.pt")
+        if "init_param" not in kwargs or not os.path.exists(kwargs["init_param"]):
+            kwargs["init_param"] = init_param
+            assert os.path.exists(kwargs["init_param"]), "init_param does not exist"
         if os.path.exists(os.path.join(model_or_path, "tokens.txt")):
             kwargs["tokenizer_conf"]["token_list"] = os.path.join(model_or_path, "tokens.txt")
         if os.path.exists(os.path.join(model_or_path, "tokens.json")):
@@ -122,7 +122,7 @@
     ):
         config = OmegaConf.load(os.path.join(model_or_path, "config.yaml"))
         kwargs = OmegaConf.merge(config, kwargs)
-        init_param = os.path.join(model_or_path, "model.pb")
+        init_param = os.path.join(model_or_path, "model.pt")
         kwargs["init_param"] = init_param
         if os.path.exists(os.path.join(model_or_path, "tokens.txt")):
             kwargs["tokenizer_conf"]["token_list"] = os.path.join(model_or_path, "tokens.txt")
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index c209026..738ba92 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -21,6 +21,8 @@
 from funasr.train_utils.device_funcs import to_device
 import traceback
 
+dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
+
 
 @tables.register("model_classes", "LLMASR")
 class LLMASR(nn.Module):
@@ -394,7 +396,9 @@
             # frontend = model.kwargs.get("frontend")
             audio_encoder_output_size = model.model.encoder_output_size
 
-            audio_encoder = model.model.model.encoder
+            audio_encoder = (
+                model.model.model.encoder if hasattr(model.model, "model") else model.model.encoder
+            )
 
             # self.frontend = frontend
 
@@ -405,38 +409,60 @@
             audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
             audio_encoder_output_size = audio_encoder.output_size()
         freeze = audio_encoder_conf.get("freeze", True)
+        freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
+        # if freeze_layer_num > 0:
+        #     freeze_layer_num = range(freeze_layer_num)
+
         if freeze:
             for name, param in audio_encoder.named_parameters():
-                param.requires_grad = False
+                if freeze_layer_num > 0:
+                    idx = re.search(r"\.\d+\.", name)
+                    if idx is not None:
+                        beg, end = idx.regs[0]
+                        layer_id = int(name[beg + 1 : end - 1])
+                        if layer_id < freeze_layer_num:
+                            param.requires_grad = False
+                    elif "ln_post." not in name:
+                        param.requires_grad = False
+                else:
+                    param.requires_grad = False
+
             audio_encoder.eval()
 
         self.audio_encoder = audio_encoder
 
         # llm
-        hub = llm_conf.get("hub", "hf")
         self.llm = None
-        if hub == "hf":
-            from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
 
-            init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
+        from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
 
-            model = AutoModelForCausalLM.from_pretrained(
-                init_param_path,
-                load_in_8bit=None,
-                device_map=None,
-                use_cache=None,
-            )
-            freeze = llm_conf.get("freeze", True)
-            if freeze:
-                for name, param in model.named_parameters():
-                    param.requires_grad = False
-                model.eval()
-            self.llm = model
+        init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
+
+        model = AutoModelForCausalLM.from_pretrained(
+            init_param_path,
+            load_in_8bit=None,
+            device_map=None,
+            use_cache=None,
+        )
+        freeze = llm_conf.get("freeze", True)
+        if freeze:
+            for name, param in model.named_parameters():
+                param.requires_grad = False
+            model.eval()
+        self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
+        self.llm = model.to(dtype_map[self.llm_dtype])
+        llm_dim = model.get_input_embeddings().weight.shape[-1]
 
         # adaptor
         adaptor_class = tables.adaptor_classes.get(audio_adaptor)
         audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
+        audio_adaptor_conf["llm_dim"] = llm_dim
         audio_adaptor = adaptor_class(**audio_adaptor_conf)
+        init_param_path = audio_adaptor_conf.get("init_param_path", None)
+        if init_param_path is not None:
+            src_state = torch.load(init_param_path, map_location="cpu")
+            flag = audio_adaptor.load_state_dict(src_state, strict=False)
+            logging.info(f"Loading audio_adaptor ckpt: {init_param_path}, status: {flag}")
 
         self.audio_adaptor = audio_adaptor
 
@@ -470,11 +496,12 @@
 
         batch_size, frames, _ = speech.shape
 
-        # audio encoder
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        with torch.cuda.amp.autocast(enabled=False):
+            # audio encoder
+            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
-        # audio_adaptor
-        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+            # audio_adaptor
+            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
         input_ids[input_ids < 0] = 0
         inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
@@ -504,12 +531,17 @@
                     batch_idx, :min_len, :
                 ]
 
-        labels_ids[labels_ids == -1] = -100
-
-        model_outputs = self.llm(
-            inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
-        )
-        loss = model_outputs.loss
+        with torch.cuda.amp.autocast(
+            enabled=True if self.llm_dtype != "fp32" else False, dtype=dtype_map[self.llm_dtype]
+        ):
+            labels_ids[labels_ids == -1] = -100
+            attention_mask[attention_mask < 0] = 0
+            model_outputs = self.llm(
+                inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
+                attention_mask=attention_mask,
+                labels=labels_ids,
+            )
+            loss = model_outputs.loss
 
         stats = {}
         with torch.no_grad():
@@ -531,6 +563,519 @@
             batch_size = int((labels_ids > 0 + 1).sum())
         loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
         return loss, stats, weight
+
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        return encoder_out, encoder_out_lens
+
+    def data_template(self, data):
+        system, user, assistant = [], [], []
+        for i, item in enumerate(data):
+            role = item["role"]
+            content = item["content"]
+            if role == "system":
+                system.append(content)
+            elif role == "user":
+                user.append(content)
+            elif role == "assistant":
+                assistant.append(content)
+
+        system = system * len(user)
+
+        contents = {
+            "system": system,
+            "user": user,
+            "assistant": assistant,
+        }
+
+        return contents
+
+    def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
+
+        system = contents["system"]
+        user = contents["user"]
+        assistant = contents["assistant"]
+        pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
+        input_ids, labels, source_ids, target_ids, fbank, fbank_lens, fbank_mask, fbank_beg = (
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+            [],
+        )
+
+        for i, (system_prompt, user_prompt, target_out) in enumerate(zip(system, user, assistant)):
+
+            source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
+
+            splits = pattern.split(source_input)
+            source_ids_i = []
+            fbank_mask_i = []
+            fbank_beg_i = []
+            fbank_lens_i = []
+            # target_ids_i = []
+            for k, sub_str in enumerate(splits):
+                if not sub_str.startswith("<|startofspeech|>"):
+                    sub_token = tokenizer.encode(sub_str)
+                    source_ids_i += sub_token
+                    fbank_mask_i += [0] * len(sub_token)
+                else:
+                    sub_str = sub_str.replace("<|startofspeech|>", "").replace(
+                        "<|endofspeech|>", ""
+                    )
+                    if sub_str.startswith("!"):
+                        try:
+                            time1 = time.perf_counter()
+                            data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs)
+                            time2 = time.perf_counter()
+                            meta_data["load_data"] = f"{time2 - time1:0.3f}"
+                        except Exception as e:
+                            logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
+
+                        speech, speech_lengths = extract_fbank(
+                            data_src,
+                            data_type=kwargs.get("data_type", "sound"),
+                            frontend=frontend,
+                            is_final=True,
+                        )  # speech: [b, T, d]
+
+                        time3 = time.perf_counter()
+                        meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+                        meta_data["batch_data_time"] = (
+                            speech_lengths.sum().item()
+                            * frontend.frame_shift
+                            * frontend.lfr_n
+                            / 1000
+                        )
+
+                        if hasattr(frontend, "permute") and not frontend.permute:
+                            # if kwargs.get("permute", True):
+                            speech = speech.permute(0, 2, 1)
+
+                        if (
+                            kwargs.get("dataset_conf", {}).get("audio_encoder_downsample_rate", 1)
+                            == 4
+                        ):
+                            olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
+                            olens = 1 + (olens - 3 + 2 * 1) // 2
+                        elif (
+                            kwargs.get("dataset_conf", {}).get("audio_encoder_downsample_rate", 1)
+                            == 1
+                        ):
+                            olens = speech_lengths[0].item()
+
+                        sub_token_len = (olens - 1) // kwargs.get("dataset_conf", {}).get(
+                            "audio_adaptor_downsample_rate", 1
+                        ) + 1
+                        sub_token = [0] * sub_token_len
+                        fbank_beg_i = [len(source_ids_i)]
+                        source_ids_i += sub_token
+                        fbank_mask_i += [1] * len(sub_token)
+
+            source_mask = [-100] * len(source_ids_i)
+            target_out = f"{target_out}<|im_end|>"
+            target_ids = tokenizer.encode(target_out)
+            input_ids += source_ids_i + target_ids
+            labels += source_mask + target_ids
+            fbank_mask += fbank_mask_i
+            fbank_beg.append(fbank_beg_i)
+
+        input_ids = torch.tensor(input_ids, dtype=torch.int64)  # [: self.max_token_length]
+        attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
+        labels = torch.tensor(labels, dtype=torch.int64)  # [: self.max_token_length]
+        source_ids = torch.tensor(source_ids_i, dtype=torch.int64)
+        target_ids = torch.tensor(target_ids, dtype=torch.int64)
+
+        fbank = speech[0, :, :]
+        fbank_lens = speech_lengths
+        fbank_mask = torch.tensor(fbank_mask, dtype=torch.float32)
+        fbank_beg = torch.tensor(fbank_beg, dtype=torch.int32)
+
+        output = {
+            "speech": fbank[None, :, :],
+            "speech_lengths": fbank_lens[:, None],
+            "fbank_mask": fbank_mask[None, :],
+            "fbank_beg": fbank_beg[None,],
+            "input_ids": input_ids[None, :],
+            "attention_mask": attention_mask[None, :],
+            "labels_ids": labels[None, :],
+            "source_ids": source_ids[None, :],
+            "target_ids": target_ids[None, :],
+        }
+
+        return output
+
+    def inference(
+        self,
+        data_in,
+        data_lengths=None,
+        key: list = None,
+        tokenizer=None,
+        frontend=None,
+        **kwargs,
+    ):
+
+        meta_data = {}
+        prompt = kwargs.get("prompt", None)
+
+        if kwargs.get("batch_size", 1) > 1:
+            raise NotImplementedError("batch decoding is not implemented")
+
+        contents = self.data_template(data_in[0])
+        output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
+        batch = to_device(output, kwargs["device"])
+
+        # audio encoder
+        speech = batch["speech"]
+        speech_lengths = batch["speech_lengths"][:, 0]
+        # fp16
+        if kwargs.get("fp16", False):
+            speech = speech.to(torch.float16)
+        elif kwargs.get("bf16", False):
+            speech = speech.to(torch.bfloat16)
+        # audio encoder
+        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
+
+        # audio_adaptor
+        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+
+        input_ids = batch["input_ids"]
+        source_ids = batch["source_ids"]
+        if not kwargs.get("tearchforing", False):
+            input_ids = source_ids
+        input_ids[input_ids < 0] = 0
+        inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
+
+        batch_size, token_num, dims = inputs_embeds.shape
+        fbank_beg = batch["fbank_beg"]
+        for batch_idx in range(batch_size):
+
+            min_len = encoder_out_lens[batch_idx].item()
+            fbank_beg_idx = fbank_beg[batch_idx]
+            inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + min_len, :] = encoder_out[
+                batch_idx, :min_len, :
+            ]
+
+        llm_dtype = kwargs.get("llm_dtype", "fp32")
+        if llm_dtype == "fp32":
+            llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
+            llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
+
+        with torch.cuda.amp.autocast(
+            enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
+        ):
+            label = contents["assistant"][0]
+            self.llm = self.llm.to(dtype_map[llm_dtype])
+            inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
+
+            if not kwargs.get("tearchforing", False):
+
+                generated_ids = self.llm.generate(
+                    inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512)
+                )
+                # generated_ids = [
+                #     output_ids[len(input_id) :]
+                #     for input_id, output_ids in zip(input_ids, generated_ids)
+                # ]
+                response = tokenizer.batch_decode(
+                    generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True)
+                )[0]
+
+                loss = None
+            else:
+
+                labels_ids = batch["labels_ids"]
+                labels_ids[labels_ids == -1] = -100
+                attention_mask = batch.get("attention_mask", None)
+                # attention_mask = attention_mask.to(dtype_map[llm_dtype])
+                model_outputs = self.llm(
+                    inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+                )
+
+                preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
+                response = tokenizer.batch_decode(
+                    preds,
+                    add_special_tokens=False,
+                    skip_special_tokens=kwargs.get("skip_special_tokens", True),
+                )[0]
+                loss = model_outputs.loss.item()
+
+        ibest_writer = None
+        if kwargs.get("output_dir") is not None:
+            if not hasattr(self, "writer"):
+                self.writer = DatadirWriter(kwargs.get("output_dir"))
+            ibest_writer = self.writer[f"{0 + 1}best_recog"]
+
+        results = []
+        response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response)
+        result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label}
+        if loss is not None:
+            result_i["loss"] = loss
+        results.append(result_i)
+
+        if ibest_writer is not None:
+            ibest_writer["text"][key[0]] = response
+            ibest_writer["label"][key[0]] = label
+            ibest_writer["text_tn"][key[0]] = response_clean
+
+        return results, meta_data
+
+
+@tables.register("model_classes", "LLMASR3")
+class LLMASR3(LLMASR2):
+    """ """
+
+    def __init__(
+        self,
+        *args,
+        **kwargs,
+    ):
+
+        super().__init__(*args, **kwargs)
+
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
+        return encoder_out, encoder_out_lens
+
+
+@tables.register("model_classes", "LLMASR4")
+class LLMASR4(nn.Module):
+    """ """
+
+    def __init__(
+        self,
+        specaug: str = None,
+        specaug_conf: dict = None,
+        normalize: str = None,
+        normalize_conf: dict = None,
+        audio_encoder: str = None,
+        audio_encoder_conf: dict = None,
+        audio_adaptor: str = None,
+        audio_adaptor_conf: dict = None,
+        decoder: str = None,
+        decoder_conf: dict = None,
+        ctc: str = None,
+        ctc_conf: dict = None,
+        ctc_weight: float = 0.5,
+        llm: str = None,
+        llm_conf: dict = None,
+        input_size: int = 80,
+        vocab_size: int = -1,
+        ignore_id: int = -1,
+        blank_id: int = 0,
+        sos: int = 1,
+        eos: int = 2,
+        lsm_weight: float = 0.0,
+        length_normalized_loss: bool = False,
+        report_cer: bool = True,
+        report_wer: bool = True,
+        sym_space: str = "<space>",
+        sym_blank: str = "<blank>",
+        # extract_feats_in_collect_stats: bool = True,
+        share_embedding: bool = False,
+        # preencoder: Optional[AbsPreEncoder] = None,
+        # postencoder: Optional[AbsPostEncoder] = None,
+        **kwargs,
+    ):
+
+        super().__init__()
+
+        # audio encoder
+        hub = audio_encoder_conf.get("hub", None)
+        if hub == "ms":
+            from funasr import AutoModel
+
+            model = AutoModel(model=audio_encoder, model_revision="master")
+            # frontend = model.kwargs.get("frontend")
+            audio_encoder_output_size = model.model.encoder_output_size
+
+            audio_encoder = (
+                model.model.model.encoder if hasattr(model.model, "model") else model.model.encoder
+            )
+
+            # self.frontend = frontend
+
+        elif hub == "hf":
+            pass
+        else:
+            encoder_class = tables.encoder_classes.get(audio_encoder)
+            audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
+            audio_encoder_output_size = audio_encoder.output_size()
+        freeze = audio_encoder_conf.get("freeze", True)
+        freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
+        # if freeze_layer_num > 0:
+        #     freeze_layer_num = range(freeze_layer_num)
+
+        if freeze:
+            for name, param in audio_encoder.named_parameters():
+                if freeze_layer_num > 0:
+                    idx = re.search(r"\.\d+\.", name)
+                    if idx is not None:
+                        beg, end = idx.regs[0]
+                        layer_id = int(name[beg + 1 : end - 1])
+                        if layer_id < freeze_layer_num:
+                            param.requires_grad = False
+                    elif "ln_post." not in name:
+                        param.requires_grad = False
+                else:
+                    param.requires_grad = False
+
+            audio_encoder.eval()
+
+        self.audio_encoder = audio_encoder
+
+        # llm
+        self.llm = None
+
+        from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
+
+        init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5")
+
+        model = AutoModelForCausalLM.from_pretrained(
+            init_param_path,
+            load_in_8bit=None,
+            device_map=None,
+            use_cache=None,
+        )
+        freeze = llm_conf.get("freeze", True)
+        if freeze:
+            for name, param in model.named_parameters():
+                param.requires_grad = False
+            model.eval()
+        self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
+        self.llm = model.to(dtype_map[self.llm_dtype])
+        llm_dim = model.get_input_embeddings().weight.shape[-1]
+
+        # adaptor
+        adaptor_class = tables.adaptor_classes.get(audio_adaptor)
+        audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
+        audio_adaptor_conf["llm_dim"] = llm_dim
+        audio_adaptor = adaptor_class(**audio_adaptor_conf)
+        init_param_path = audio_adaptor_conf.get("init_param_path", None)
+        if init_param_path is not None:
+            src_state = torch.load(init_param_path, map_location="cpu")
+            flag = audio_adaptor.load_state_dict(src_state, strict=False)
+            logging.info(f"Loading audio_adaptor ckpt: {init_param_path}, status: {flag}")
+
+        self.audio_adaptor = audio_adaptor
+
+        self.error_calculator = None
+
+        self.length_normalized_loss = length_normalized_loss
+        self.beam_search = None
+
+    def forward(
+        self,
+        speech: torch.Tensor,
+        speech_lengths: torch.Tensor,
+        input_ids: torch.Tensor,
+        attention_mask: torch.Tensor,
+        labels_ids: torch.Tensor,
+        fbank_beg: torch.Tensor,
+        fbank_mask: torch.Tensor,
+        **kwargs,
+    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
+        """Encoder + Decoder + Calc loss
+        Args:
+                speech: (Batch, Length, ...)
+                speech_lengths: (Batch, )
+                text: (Batch, Length)
+                text_lengths: (Batch,)
+        """
+        import pdb
+
+        pdb.set_trace()
+        if len(speech_lengths.size()) > 1:
+            speech_lengths = speech_lengths[:, 0]
+
+        batch_size_speech, frames, _ = speech.shape
+        batch_size, token_num = input_ids.shape
+
+        with torch.cuda.amp.autocast(enabled=False):
+            # audio encoder
+            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
+
+            # audio_adaptor
+            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+
+        input_ids[input_ids < 0] = 0
+        inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
+
+        batch_size, token_num, dims = inputs_embeds.shape
+        fake_token_len = kwargs.get("fake_token_len")
+        fake_token_len[fake_token_len < 0] = 0
+        fbank_beg[fbank_beg < 0] = 0
+        speech_idx = 0
+        for batch_idx in range(batch_size):
+
+            for turn_id in range(fbank_beg.shape[1]):
+                fbank_beg_idx = fbank_beg[batch_idx, turn_id].item()
+                if fbank_beg_idx > 0:
+                    speech_token_len = fake_token_len[batch_idx, turn_id]
+                    speech_token = encoder_out[speech_idx, :speech_token_len, :]
+
+                    try:
+                        inputs_embeds[
+                            batch_idx, fbank_beg_idx : fbank_beg_idx + speech_token_len, :
+                        ] = speech_token
+                    except Exception as e:
+                        logging.error(f"{str(e)}, {traceback.format_exc()}")
+                        logging.info(
+                            f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, speech_token_len: {speech_token_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens[speech_idx].item()}"
+                        )
+                        speech_token_len = encoder_out_lens[speech_idx].item()
+                        speech_token = encoder_out[speech_idx, turn_id, :speech_token_len, :]
+                        inputs_embeds[
+                            batch_idx, fbank_beg_idx : fbank_beg_idx + speech_token_len, :
+                        ] = speech_token
+
+                    speech_idx += 1
+
+        with torch.cuda.amp.autocast(
+            enabled=True if self.llm_dtype != "fp32" else False, dtype=dtype_map[self.llm_dtype]
+        ):
+            labels_ids[labels_ids == -1] = -100
+            attention_mask[attention_mask < 0] = 0
+            model_outputs = self.llm(
+                inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
+                attention_mask=attention_mask,
+                labels=labels_ids,
+            )
+            loss = model_outputs.loss
+
+        stats = {}
+        with torch.no_grad():
+            preds = torch.argmax(model_outputs.logits, -1)
+            acc_att = compute_accuracy(preds[:, :-1], labels_ids[:, 1:], ignore_label=-100)
+            stats["acc"] = acc_att
+
+        stats["loss"] = torch.clone(loss.detach())
+        stats["batch_size"] = batch_size
+        stats["batch_size_speech"] = batch_size_speech
+        stats["batch_size_x_frames"] = frames * batch_size_speech
+        stats["batch_size_real_frames"] = speech_lengths.sum().item()
+        stats["padding_frames"] = stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
+        stats["batch_size_x_tokens"] = token_num * batch_size
+        stats["batch_size_real_tokens"] = attention_mask.sum().item()
+        stats["padding_tokens"] = stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
+
+        # force_gatherable: to-device and to-tensor if scalar for DataParallel
+        if self.length_normalized_loss:
+            batch_size = int((labels_ids > 0 + 1).sum())
+        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
+        return loss, stats, weight
+
+    def encode(self, speech, speech_lengths):
+        # audio encoder
+        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+
+        return encoder_out, encoder_out_lens
 
     def data_template(self, data):
         system, user, assistant = [], [], []
@@ -685,11 +1230,10 @@
         # fp16
         if kwargs.get("fp16", False):
             speech = speech.to(torch.float16)
-            encoder_out_lens = encoder_out_lens.to(torch.float16)
         elif kwargs.get("bf16", False):
             speech = speech.to(torch.bfloat16)
-            encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        # audio encoder
+        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
 
         # audio_adaptor
         encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -712,11 +1256,16 @@
             ]
 
         llm_dtype = kwargs.get("llm_dtype", "fp32")
-        dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
-        with torch.cuda.amp.autocast(dtype=dtype_map[llm_dtype]):
+        if llm_dtype == "fp32":
+            llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
+            llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
+
+        with torch.cuda.amp.autocast(
+            enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
+        ):
             label = contents["assistant"][0]
-            # self.llm = self.llm.to(dtype_map[llm_dtype])
-            # inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
+            self.llm = self.llm.to(dtype_map[llm_dtype])
+            inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
 
             if not kwargs.get("tearchforing", False):
 
diff --git a/funasr/models/paraformer/cif_predictor.py b/funasr/models/paraformer/cif_predictor.py
index 05e283a..0856eed 100644
--- a/funasr/models/paraformer/cif_predictor.py
+++ b/funasr/models/paraformer/cif_predictor.py
@@ -494,6 +494,8 @@
         token_num_floor = torch.floor(token_num)
 
         return hidden, alphas, token_num_floor
+
+
 @torch.jit.script
 def cif_v1_export(hidden, alphas, threshold: float):
     device = hidden.device
@@ -504,7 +506,7 @@
     frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
     fires = torch.zeros(batch_size, len_time, dtype=dtype, device=device)
 
-    prefix_sum = torch.cumsum(alphas, dim=1, dtype=torch.float64).to(torch.float32) # cumsum precision degradation cause wrong result in extreme 
+    prefix_sum = torch.cumsum(alphas, dim=1)
     prefix_sum_floor = torch.floor(prefix_sum)
     dislocation_prefix_sum = torch.roll(prefix_sum, 1, dims=1)
     dislocation_prefix_sum_floor = torch.floor(dislocation_prefix_sum)
@@ -516,9 +518,7 @@
     fires[fire_idxs] = 1
     fires = fires + prefix_sum - prefix_sum_floor
 
-    prefix_sum_hidden = torch.cumsum(
-        alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1
-    )
+    prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
 
     frames = prefix_sum_hidden[fire_idxs]
     shift_frames = torch.roll(frames, 1, dims=0)
@@ -530,25 +530,21 @@
     shift_frames[shift_batch_idxs] = 0
 
     remains = fires - torch.floor(fires)
-    remain_frames = (
-        remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
-    )
+    remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
 
     shift_remain_frames = torch.roll(remain_frames, 1, dims=0)
     shift_remain_frames[shift_batch_idxs] = 0
 
     frames = frames - shift_frames + shift_remain_frames - remain_frames
 
-    max_label_len = alphas.sum(dim=-1)
-    max_label_len = torch.floor(max_label_len).max().to(dtype=torch.int64)
+    max_label_len = batch_len.max()
 
-    frame_fires = torch.zeros(
-        batch_size, max_label_len, hidden_size, dtype=dtype, device=device
-    )
+    frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
     indices = torch.arange(max_label_len, device=device).expand(batch_size, -1)
     frame_fires_idxs = indices < batch_len.unsqueeze(1)
     frame_fires[frame_fires_idxs] = frames
     return frame_fires, fires
+
 
 @torch.jit.script
 def cif_export(hidden, alphas, threshold: float):
@@ -671,7 +667,7 @@
 
     fires = torch.zeros(batch_size, len_time, dtype=dtype, device=device)
 
-    prefix_sum = torch.cumsum(alphas, dim=1, dtype=torch.float64).to(torch.float32) # cumsum precision degradation cause wrong result in extreme 
+    prefix_sum = torch.cumsum(alphas, dim=1)
     prefix_sum_floor = torch.floor(prefix_sum)
     dislocation_prefix_sum = torch.roll(prefix_sum, 1, dims=1)
     dislocation_prefix_sum_floor = torch.floor(dislocation_prefix_sum)
@@ -693,11 +689,8 @@
     device = hidden.device
     dtype = hidden.dtype
     batch_size, len_time, hidden_size = hidden.size()
-    frames = torch.zeros(batch_size, len_time, hidden_size,
-                         dtype=dtype, device=device)
-    prefix_sum_hidden = torch.cumsum(
-        alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1
-    )
+    frames = torch.zeros(batch_size, len_time, hidden_size, dtype=dtype, device=device)
+    prefix_sum_hidden = torch.cumsum(alphas.unsqueeze(-1).tile((1, 1, hidden_size)) * hidden, dim=1)
 
     frames = prefix_sum_hidden[fire_idxs]
     shift_frames = torch.roll(frames, 1, dims=0)
@@ -709,21 +702,16 @@
     shift_frames[shift_batch_idxs] = 0
 
     remains = fires - torch.floor(fires)
-    remain_frames = (
-        remains[fire_idxs].unsqueeze(-1).tile((1,
-                                               hidden_size)) * hidden[fire_idxs]
-    )
+    remain_frames = remains[fire_idxs].unsqueeze(-1).tile((1, hidden_size)) * hidden[fire_idxs]
 
     shift_remain_frames = torch.roll(remain_frames, 1, dims=0)
     shift_remain_frames[shift_batch_idxs] = 0
 
     frames = frames - shift_frames + shift_remain_frames - remain_frames
 
-    max_label_len = torch.round(alphas.sum(-1)).int().max() # torch.round to calculate the max length
+    max_label_len = batch_len.max()
 
-    frame_fires = torch.zeros(
-        batch_size, max_label_len, hidden_size, dtype=dtype, device=device
-    )
+    frame_fires = torch.zeros(batch_size, max_label_len, hidden_size, dtype=dtype, device=device)
     indices = torch.arange(max_label_len, device=device).expand(batch_size, -1)
     frame_fires_idxs = indices < batch_len.unsqueeze(1)
     frame_fires[frame_fires_idxs] = frames
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index 97f1b19..a9b2149 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -16,6 +16,7 @@
 from . import whisper_lib as whisper
 from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
 from funasr.utils.datadir_writer import DatadirWriter
+from funasr.models.ctc.ctc import CTC
 
 from funasr.register import tables
 
@@ -1035,6 +1036,7 @@
         self.length_normalized_loss = length_normalized_loss
         self.beam_search = None
         self.activation_checkpoint = kwargs.get("activation_checkpoint", False)
+        self.encoder_output_size = encoder_output_size
 
     def forward(
         self,
@@ -1256,7 +1258,7 @@
         if isinstance(task, str):
             task = [task]
         task = "".join([f"<|{x}|>" for x in task])
-        
+
         sos = kwargs.get("model_conf").get("sos")
         if isinstance(sos, str):
             initial_prompt = kwargs.get("initial_prompt", f"<|startoftranscript|>{task}")
@@ -1270,7 +1272,9 @@
             language = DecodingOptions.get("language", None)
             language = None if language == "auto" else language
             initial_prompt = kwargs.get("initial_prompt", f"{task}")
-            initial_prompt_lid = f"{initial_prompt}<|{language}|>" if language is not None else initial_prompt
+            initial_prompt_lid = (
+                f"{initial_prompt}<|{language}|>" if language is not None else initial_prompt
+            )
             initial_prompt_lid_int = tokenizer.encode(initial_prompt_lid, allowed_special="all")
             sos_int = [sos] + initial_prompt_lid_int
         eos = kwargs.get("model_conf").get("eos")
@@ -1303,9 +1307,7 @@
         )
         self.beam_search.event_score_ga = DecodingOptions.get("gain_tokens_score", [1, 1, 1, 1])
 
-        encoder_out, encoder_out_lens = self.encode(
-            speech[None, :, :], speech_lengths
-        )
+        encoder_out, encoder_out_lens = self.encode(speech[None, :, :], speech_lengths)
 
         if text_token_int is not None:
             i = 0
@@ -1384,3 +1386,279 @@
                     ibest_writer["text"][key[i]] = text
 
         return results, meta_data
+
+
+from funasr.models.paraformer.search import Hypothesis
+from funasr.utils import postprocess_utils
+
+
+@tables.register("model_classes", "SenseVoiceSANMCTC")
+class SenseVoiceSANMCTC(nn.Module):
+    """CTC-attention hybrid Encoder-Decoder model"""
+
+    def __init__(
+        self,
+        specaug: str = None,
+        specaug_conf: dict = None,
+        normalize: str = None,
+        normalize_conf: dict = None,
+        encoder: str = None,
+        encoder_conf: dict = None,
+        ctc_conf: dict = None,
+        input_size: int = 80,
+        vocab_size: int = -1,
+        ignore_id: int = -1,
+        blank_id: int = 0,
+        sos: int = 1,
+        eos: int = 2,
+        length_normalized_loss: bool = False,
+        **kwargs,
+    ):
+
+        super().__init__()
+
+        if specaug is not None:
+            specaug_class = tables.specaug_classes.get(specaug)
+            specaug = specaug_class(**specaug_conf)
+        if normalize is not None:
+            normalize_class = tables.normalize_classes.get(normalize)
+            normalize = normalize_class(**normalize_conf)
+        encoder_class = tables.encoder_classes.get(encoder)
+        encoder = encoder_class(input_size=input_size, **encoder_conf)
+        encoder_output_size = encoder.output_size()
+
+        if ctc_conf is None:
+            ctc_conf = {}
+        ctc = CTC(odim=vocab_size, encoder_output_size=encoder_output_size, **ctc_conf)
+
+        self.blank_id = blank_id
+        self.sos = sos if sos is not None else vocab_size - 1
+        self.eos = eos if eos is not None else vocab_size - 1
+        self.vocab_size = vocab_size
+        self.ignore_id = ignore_id
+        self.specaug = specaug
+        self.normalize = normalize
+        self.encoder = encoder
+        self.error_calculator = None
+
+        self.ctc = ctc
+
+        self.length_normalized_loss = length_normalized_loss
+        self.encoder_output_size = encoder_output_size
+
+        self.lid_dict = {"zh": 3, "en": 4, "yue": 7, "ja": 11, "ko": 12, "nospeech": 13}
+        self.textnorm_dict = {"withtextnorm": 14, "wotextnorm": 15}
+        self.embed = torch.nn.Embedding(8 + len(self.lid_dict) + len(self.textnorm_dict), 560)
+
+    def forward(
+        self,
+        speech: torch.Tensor,
+        speech_lengths: torch.Tensor,
+        text: torch.Tensor,
+        text_lengths: torch.Tensor,
+        **kwargs,
+    ):
+        """Encoder + Decoder + Calc loss
+        Args:
+                speech: (Batch, Length, ...)
+                speech_lengths: (Batch, )
+                text: (Batch, Length)
+                text_lengths: (Batch,)
+        """
+        # import pdb;
+        # pdb.set_trace()
+        if len(text_lengths.size()) > 1:
+            text_lengths = text_lengths[:, 0]
+        if len(speech_lengths.size()) > 1:
+            speech_lengths = speech_lengths[:, 0]
+
+        batch_size = speech.shape[0]
+
+        # 1. Encoder
+        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
+
+        loss_ctc, cer_ctc = None, None
+        stats = dict()
+
+        loss_ctc, cer_ctc = self._calc_ctc_loss(encoder_out, encoder_out_lens, text, text_lengths)
+
+        loss = loss_ctc
+
+        # Collect total loss stats
+        stats["loss"] = torch.clone(loss.detach())
+
+        # force_gatherable: to-device and to-tensor if scalar for DataParallel
+        if self.length_normalized_loss:
+            batch_size = int((text_lengths + 1).sum())
+        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
+        return loss, stats, weight
+
+    def encode(
+        self,
+        speech: torch.Tensor,
+        speech_lengths: torch.Tensor,
+        **kwargs,
+    ):
+        """Frontend + Encoder. Note that this method is used by asr_inference.py
+        Args:
+                speech: (Batch, Length, ...)
+                speech_lengths: (Batch, )
+                ind: int
+        """
+
+        # Data augmentation
+        if self.specaug is not None and self.training:
+            speech, speech_lengths = self.specaug(speech, speech_lengths)
+
+        # Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
+        if self.normalize is not None:
+            speech, speech_lengths = self.normalize(speech, speech_lengths)
+
+        # Forward encoder
+        # feats: (Batch, Length, Dim)
+        # -> encoder_out: (Batch, Length2, Dim2)
+        encoder_out, encoder_out_lens = self.encoder(speech, speech_lengths)
+
+        return encoder_out, encoder_out_lens
+
+    def _calc_ctc_loss(
+        self,
+        encoder_out: torch.Tensor,
+        encoder_out_lens: torch.Tensor,
+        ys_pad: torch.Tensor,
+        ys_pad_lens: torch.Tensor,
+    ):
+        # Calc CTC loss
+        loss_ctc = self.ctc(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens)
+
+        # Calc CER using CTC
+        cer_ctc = None
+        if not self.training and self.error_calculator is not None:
+            ys_hat = self.ctc.argmax(encoder_out).data
+            cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
+        return loss_ctc, cer_ctc
+
+    def inference(
+        self,
+        data_in,
+        data_lengths=None,
+        key: list = None,
+        tokenizer=None,
+        frontend=None,
+        **kwargs,
+    ):
+
+        if kwargs.get("batch_size", 1) > 1:
+            raise NotImplementedError("batch decoding is not implemented")
+
+        meta_data = {}
+        if (
+            isinstance(data_in, torch.Tensor) and kwargs.get("data_type", "sound") == "fbank"
+        ):  # fbank
+            speech, speech_lengths = data_in, data_lengths
+            if len(speech.shape) < 3:
+                speech = speech[None, :, :]
+            if speech_lengths is None:
+                speech_lengths = speech.shape[1]
+        else:
+            # extract fbank feats
+            time1 = time.perf_counter()
+            audio_sample_list = load_audio_text_image_video(
+                data_in,
+                fs=frontend.fs,
+                audio_fs=kwargs.get("fs", 16000),
+                data_type=kwargs.get("data_type", "sound"),
+                tokenizer=tokenizer,
+            )
+            time2 = time.perf_counter()
+            meta_data["load_data"] = f"{time2 - time1:0.3f}"
+            speech, speech_lengths = extract_fbank(
+                audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend
+            )
+            time3 = time.perf_counter()
+            meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+            meta_data["batch_data_time"] = (
+                speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
+            )
+
+        speech = speech.to(device=kwargs["device"])
+        speech_lengths = speech_lengths.to(device=kwargs["device"])
+
+        language = kwargs.get("language", None)
+        if language is not None:
+            language_query = self.embed(
+                torch.LongTensor(
+                    [[self.lid_dict[language] if language in self.lid_dict else 0]]
+                ).to(speech.device)
+            ).repeat(speech.size(0), 1, 1)
+        else:
+            language_query = self.embed(torch.LongTensor([[0]]).to(speech.device)).repeat(
+                speech.size(0), 1, 1
+            )
+        textnorm = kwargs.get("text_norm", "wotextnorm")
+        textnorm_query = self.embed(
+            torch.LongTensor([[self.textnorm_dict[textnorm]]]).to(speech.device)
+        ).repeat(speech.size(0), 1, 1)
+        speech = torch.cat((textnorm_query, speech), dim=1)
+        speech_lengths += 1
+
+        event_emo_query = self.embed(torch.LongTensor([[1, 2]]).to(speech.device)).repeat(
+            speech.size(0), 1, 1
+        )
+        input_query = torch.cat((language_query, event_emo_query), dim=1)
+        speech = torch.cat((input_query, speech), dim=1)
+        speech_lengths += 3
+
+        # Encoder
+        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
+        if isinstance(encoder_out, tuple):
+            encoder_out = encoder_out[0]
+
+        # c. Passed the encoder result and the beam search
+        ctc_logits = self.ctc.log_softmax(encoder_out)
+
+        results = []
+        b, n, d = encoder_out.size()
+        if isinstance(key[0], (list, tuple)):
+            key = key[0]
+        if len(key) < b:
+            key = key * b
+        for i in range(b):
+            x = ctc_logits[i, : encoder_out_lens[i], :]
+            yseq = x.argmax(dim=-1)
+            yseq = torch.unique_consecutive(yseq, dim=-1)
+            yseq = torch.tensor([self.sos] + yseq.tolist() + [self.eos], device=yseq.device)
+            nbest_hyps = [Hypothesis(yseq=yseq)]
+
+            for nbest_idx, hyp in enumerate(nbest_hyps):
+                ibest_writer = None
+                if kwargs.get("output_dir") is not None:
+                    if not hasattr(self, "writer"):
+                        self.writer = DatadirWriter(kwargs.get("output_dir"))
+                    ibest_writer = self.writer[f"{nbest_idx + 1}best_recog"]
+
+                # remove sos/eos and get results
+                last_pos = -1
+                if isinstance(hyp.yseq, list):
+                    token_int = hyp.yseq[1:last_pos]
+                else:
+                    token_int = hyp.yseq[1:last_pos].tolist()
+
+                # remove blank symbol id, which is assumed to be 0
+                token_int = list(
+                    filter(
+                        lambda x: x != self.eos and x != self.sos and x != self.blank_id, token_int
+                    )
+                )
+
+                # Change integer-ids to tokens
+                text = tokenizer.decode(token_int)
+
+                result_i = {"key": key[i], "text": text}
+                results.append(result_i)
+
+                if ibest_writer is not None:
+                    ibest_writer["token"][key[i]] = " ".join(token)
+                    ibest_writer["text"][key[i]] = text_postprocessed
+
+        return results, meta_data
diff --git a/funasr/train_utils/load_pretrained_model.py b/funasr/train_utils/load_pretrained_model.py
index 02abfd5..8ed613c 100644
--- a/funasr/train_utils/load_pretrained_model.py
+++ b/funasr/train_utils/load_pretrained_model.py
@@ -10,36 +10,6 @@
 import pdb
 
 
-def filter_state_dict(
-    dst_state: Dict[str, Union[float, torch.Tensor]],
-    src_state: Dict[str, Union[float, torch.Tensor]],
-):
-    """Filter name, size mismatch instances between dicts.
-
-    Args:
-            dst_state: reference state dict for filtering
-            src_state: target state dict for filtering
-
-    """
-    match_state = {}
-    for key, value in src_state.items():
-        if key in dst_state and (dst_state[key].size() == src_state[key].size()):
-            match_state[key] = value
-        else:
-            if key not in dst_state:
-                logging.warning(
-                    f"Filter out {key} from pretrained dict"
-                    + " because of name not found in target dict"
-                )
-            else:
-                logging.warning(
-                    f"Filter out {key} from pretrained dict"
-                    + " because of size mismatch"
-                    + f"({dst_state[key].size()}-{src_state[key].size()})"
-                )
-    return match_state
-
-
 def load_pretrained_model(
     path: str,
     model: torch.nn.Module,
@@ -62,7 +32,7 @@
     obj = model
     dst_state = obj.state_dict()
 
-    print(f"ckpt: {path}")
+    logging.info(f"ckpt: {path}")
 
     if oss_bucket is None:
         src_state = torch.load(path, map_location=map_location)
@@ -77,8 +47,24 @@
     if isinstance(scope_map, str):
         scope_map = scope_map.split(",")
     scope_map += ["module.", "None"]
+    logging.info(f"scope_map: {scope_map}")
+
+    if excludes is not None:
+        if isinstance(excludes, str):
+            excludes = excludes.split(",")
+
+    logging.info(f"excludes: {excludes}")
 
     for k in dst_state.keys():
+        excludes_flag = False
+        if excludes is not None:
+            for k_ex in excludes:
+                if k.startswith(k_ex):
+                    logging.info(f"key: {k} matching: {k_ex}, excluded")
+                    excludes_flag = True
+                    break
+        if excludes_flag:
+            continue
 
         k_src = k
 
@@ -92,25 +78,25 @@
                 if dst_prefix == "" and (src_prefix + k) in src_state.keys():
                     k_src = src_prefix + k
                     if not k_src.startswith("module."):
-                        print(f"init param, map: {k} from {k_src} in ckpt")
+                        logging.info(f"init param, map: {k} from {k_src} in ckpt")
                 elif (
                     k.startswith(dst_prefix)
                     and k.replace(dst_prefix, src_prefix, 1) in src_state.keys()
                 ):
                     k_src = k.replace(dst_prefix, src_prefix, 1)
                     if not k_src.startswith("module."):
-                        print(f"init param, map: {k} from {k_src} in ckpt")
+                        logging.info(f"init param, map: {k} from {k_src} in ckpt")
 
         if k_src in src_state.keys():
             if ignore_init_mismatch and dst_state[k].shape != src_state[k_src].shape:
-                print(
+                logging.info(
                     f"ignore_init_mismatch:{ignore_init_mismatch}, dst: {k, dst_state[k].shape}, src: {k_src, src_state[k_src].shape}"
                 )
             else:
                 dst_state[k] = src_state[k_src]
 
         else:
-            print(f"Warning, miss key in ckpt: {k}, mapped: {k_src}")
+            print(f"Warning, miss key in ckpt: {k}, {path}")
 
     flag = obj.load_state_dict(dst_state, strict=True)
-    # print(flag)
+    logging.info(f"Loading ckpt: {path}, status: {flag}")
diff --git a/funasr/train_utils/trainer_ds.py b/funasr/train_utils/trainer_ds.py
index ec76531..85513a5 100644
--- a/funasr/train_utils/trainer_ds.py
+++ b/funasr/train_utils/trainer_ds.py
@@ -29,9 +29,10 @@
         with torch.cuda.amp.autocast(enabled=True, dtype=dtype, cache_enabled=False):
             yield
     else:
-        if dtype == torch.float16:
-            with autocast(enabled=True):
-                yield
+        if dtype == torch.float16 or dtype == torch.bfloat16:
+            yield
+            # with autocast(enabled=True, dtype=dtype):
+            #     yield
         else:
             yield
 
@@ -60,6 +61,7 @@
         use_ddp: bool = False,
         use_fsdp: bool = False,
         use_fp16: bool = False,
+        use_bf16: bool = False,
         use_deepspeed: bool = False,
         output_dir: str = "./",
         **kwargs,
@@ -78,7 +80,7 @@
                       output_dir (str): The directory where model checkpoints will be saved. Default is './'.
                       resume (str, optional): The file path to a checkpoint to resume training from.
         """
-        self.rank = kwargs.get("rank", 0)
+        self.rank = rank
         self.local_rank = local_rank
         self.world_size = world_size
         self.use_ddp = use_ddp
@@ -98,8 +100,11 @@
         self.batch_total = 0
         self.dtype = torch.float32
         self.use_fp16 = use_fp16
+        self.use_bf16 = use_bf16
         if self.use_fp16:
             self.dtype = torch.float16
+        if self.use_bf16:
+            self.dtype = torch.bfloat16
         self.save_checkpoint_interval = kwargs.get("save_checkpoint_interval", 5000)
         self.validate_interval = kwargs.get("validate_interval", 5000)
         self.keep_nbest_models = kwargs.get("keep_nbest_models", 500)
@@ -147,6 +152,16 @@
 
         self.use_deepspeed = use_deepspeed
         self.deepspeed_config = kwargs.get("deepspeed_config", "")
+        excludes = kwargs.get("excludes", None)
+        if excludes is not None:
+            if isinstance(excludes, str):
+                excludes = excludes.split(",")
+        self.excludes = excludes
+        effective_save_name_excludes = kwargs.get("effective_save_name_excludes", None)
+        if effective_save_name_excludes is not None:
+            if isinstance(effective_save_name_excludes, str):
+                effective_save_name_excludes = effective_save_name_excludes.split(",")
+        self.effective_save_name_excludes = effective_save_name_excludes
 
     def save_checkpoint(
         self,
@@ -277,11 +292,12 @@
         elif self.use_fsdp:
             pass
         elif self.rank == 0:
-            logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n")
+            logging.info(
+                f"Save checkpoint: {epoch}, rank: {self.rank}, local_rank: {self.local_rank}\n"
+            )
             # self.step_or_epoch += 1
             state = {
                 "epoch": epoch,
-                "state_dict": model.state_dict(),
                 "optimizer": optim.state_dict(),
                 "scheduler": scheduler.state_dict(),
                 "saved_ckpts": self.saved_ckpts,
@@ -299,7 +315,24 @@
             }
             step = step_in_epoch
             if hasattr(model, "module"):
-                state["state_dict"] = model.module.state_dict()
+                state_dict = model.module.state_dict()
+            else:
+                state_dict = model.state_dict()
+
+            if self.effective_save_name_excludes is not None:
+                logging.info(f"effective_save_name_excludes: {self.effective_save_name_excludes}")
+                dst_state_dict = {}
+                for k in state_dict.keys():
+                    for k_ex in self.effective_save_name_excludes:
+                        k_tmp = k.replace("module.", "")
+                        if k.startswith(k_ex):
+                            logging.info(f"key: {k} matching: {k_ex}, not save it")
+                            break
+                    else:
+                        dst_state_dict[k] = state_dict[k]
+                state["state_dict"] = dst_state_dict
+            else:
+                state["state_dict"] = state_dict
 
             if scaler:
                 state["scaler_state"] = scaler.state_dict()
@@ -440,6 +473,16 @@
                     src_state = checkpoint["state_dict"]
                     dst_state = model.state_dict()
                     for k in dst_state.keys():
+                        excludes_flag = False
+                        if self.excludes is not None:
+                            for k_ex in self.excludes:
+                                k_tmp = k.replace("module.", "")
+                                if k_tmp.startswith(k_ex):
+                                    logging.info(f"key: {k} matching: {k_ex}, excluded")
+                                    excludes_flag = True
+                                    break
+                        if excludes_flag:
+                            continue
                         if not k.startswith("module.") and "module." + k in src_state.keys():
                             k_ddp = "module." + k
                         elif k.startswith("module.") and "module." + k not in src_state.keys():
@@ -640,7 +683,7 @@
             scaled_loss = model.backward(loss)
         else:
             loss = loss / self.accum_grad
-            if self.use_fp16:
+            if self.use_fp16 or self.use_bf16:
                 scaler.scale(loss).backward()
             else:
                 loss.backward()
@@ -668,7 +711,7 @@
                 # Execute an optimization step (update model parameters)
                 if self.use_ddp or self.use_fsdp:
                     dist.barrier()
-                if self.use_fp16:
+                if self.use_fp16 or self.use_bf16:
                     scaler.step(optim)
                     scaler.update()
                 else:

--
Gitblit v1.9.1