| | |
| | | from funasr.train_utils.device_funcs import to_device |
| | | import traceback |
| | | |
| | | dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32} |
| | | |
| | | |
| | | @tables.register("model_classes", "LLMASR") |
| | | class LLMASR(nn.Module): |
| | |
| | | audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf) |
| | | audio_encoder_output_size = audio_encoder.output_size() |
| | | freeze = audio_encoder_conf.get("freeze", True) |
| | | freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1)) |
| | | # if freeze_layer_num > 0: |
| | | # freeze_layer_num = range(freeze_layer_num) |
| | | |
| | | if freeze: |
| | | for name, param in audio_encoder.named_parameters(): |
| | | param.requires_grad = False |
| | | if freeze_layer_num > 0: |
| | | idx = re.search(r"\.\d+\.", name) |
| | | if idx is not None: |
| | | beg, end = idx.regs[0] |
| | | layer_id = int(name[beg + 1 : end - 1]) |
| | | if layer_id < freeze_layer_num: |
| | | param.requires_grad = False |
| | | else: |
| | | param.requires_grad = False |
| | | else: |
| | | param.requires_grad = False |
| | | |
| | | audio_encoder.eval() |
| | | |
| | | self.audio_encoder = audio_encoder |
| | | |
| | | # llm |
| | | hub = llm_conf.get("hub", "hf") |
| | | self.llm = None |
| | | if hub == "hf": |
| | | from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig |
| | | |
| | | init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5") |
| | | from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig |
| | | |
| | | model = AutoModelForCausalLM.from_pretrained( |
| | | init_param_path, |
| | | load_in_8bit=None, |
| | | device_map=None, |
| | | use_cache=None, |
| | | ) |
| | | freeze = llm_conf.get("freeze", True) |
| | | if freeze: |
| | | for name, param in model.named_parameters(): |
| | | param.requires_grad = False |
| | | model.eval() |
| | | self.llm = model |
| | | init_param_path = llm_conf.get("init_param_path", "vicuna-7b-v1.5") |
| | | |
| | | model = AutoModelForCausalLM.from_pretrained( |
| | | init_param_path, |
| | | load_in_8bit=None, |
| | | device_map=None, |
| | | use_cache=None, |
| | | ) |
| | | freeze = llm_conf.get("freeze", True) |
| | | if freeze: |
| | | for name, param in model.named_parameters(): |
| | | param.requires_grad = False |
| | | model.eval() |
| | | self.llm = model |
| | | llm_dim = model.get_input_embeddings().weight.shape[-1] |
| | | self.llm_dtype = llm_conf.get("llm_dtype", "fp32") |
| | | |
| | | # adaptor |
| | | adaptor_class = tables.adaptor_classes.get(audio_adaptor) |
| | | audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size |
| | | audio_adaptor_conf["llm_dim"] = llm_dim |
| | | audio_adaptor = adaptor_class(**audio_adaptor_conf) |
| | | init_param_path = audio_adaptor_conf.get("init_param_path", None) |
| | | if init_param_path is not None: |
| | | src_state = torch.load(init_param_path, map_location="cpu") |
| | | flag = audio_adaptor.load_state_dict(src_state, strict=False) |
| | | logging.info(f"Loading audio_adaptor ckpt: {init_param_path}, status: {flag}") |
| | | |
| | | self.audio_adaptor = audio_adaptor |
| | | |
| | |
| | | batch_idx, :min_len, : |
| | | ] |
| | | |
| | | labels_ids[labels_ids == -1] = -100 |
| | | |
| | | model_outputs = self.llm( |
| | | inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids |
| | | ) |
| | | loss = model_outputs.loss |
| | | with torch.cuda.amp.autocast( |
| | | enabled=True if self.llm_dtype != "fp32" else False, dtype=dtype_map[self.llm_dtype] |
| | | ): |
| | | labels_ids[labels_ids == -1] = -100 |
| | | attention_mask[attention_mask < 0] = 0 |
| | | model_outputs = self.llm( |
| | | inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids |
| | | ) |
| | | loss = model_outputs.loss |
| | | |
| | | stats = {} |
| | | with torch.no_grad(): |
| | |
| | | |
| | | return contents |
| | | |
| | | def data_load_speech(self, contents: dict, tokenizer, frontend, **kwargs): |
| | | def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs): |
| | | |
| | | system = contents["system"] |
| | | user = contents["user"] |
| | |
| | | ) |
| | | if sub_str.startswith("!"): |
| | | try: |
| | | time1 = time.perf_counter() |
| | | data_src = load_audio_text_image_video(sub_str[1:], fs=frontend.fs) |
| | | time2 = time.perf_counter() |
| | | meta_data["load_data"] = f"{time2 - time1:0.3f}" |
| | | except Exception as e: |
| | | logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}") |
| | | |
| | |
| | | frontend=frontend, |
| | | is_final=True, |
| | | ) # speech: [b, T, d] |
| | | |
| | | time3 = time.perf_counter() |
| | | meta_data["extract_feat"] = f"{time3 - time2:0.3f}" |
| | | meta_data["batch_data_time"] = ( |
| | | speech_lengths.sum().item() |
| | | * frontend.frame_shift |
| | | * frontend.lfr_n |
| | | / 1000 |
| | | ) |
| | | |
| | | if kwargs.get("permute", True): |
| | | speech = speech.permute(0, 2, 1) |
| | |
| | | raise NotImplementedError("batch decoding is not implemented") |
| | | |
| | | contents = self.data_template(data_in[0]) |
| | | output = self.data_load_speech(contents, tokenizer, frontend, **kwargs) |
| | | output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs) |
| | | batch = to_device(output, kwargs["device"]) |
| | | |
| | | # audio encoder |
| | | speech = batch["speech"] |
| | | speech_lengths = batch["speech_lengths"][:, 0] |
| | | # fp16 |
| | | if kwargs.get("fp16", False): |
| | | speech = speech.to(torch.float16) |
| | | elif kwargs.get("bf16", False): |
| | | speech = speech.to(torch.bfloat16) |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) |
| | | |
| | | # audio_adaptor |
| | |
| | | batch_idx, :min_len, : |
| | | ] |
| | | |
| | | label = contents["assistant"][0] |
| | | if not kwargs.get("tearchforing", False): |
| | | llm_dtype = kwargs.get("llm_dtype", "fp32") |
| | | if llm_dtype == "fp32": |
| | | llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype |
| | | llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype |
| | | |
| | | generated_ids = self.llm.generate( |
| | | inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512) |
| | | ) |
| | | generated_ids = [ |
| | | output_ids[len(input_id) :] |
| | | for input_id, output_ids in zip(input_ids, generated_ids) |
| | | ] |
| | | response = tokenizer.batch_decode( |
| | | generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True) |
| | | )[0] |
| | | with torch.cuda.amp.autocast( |
| | | enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype] |
| | | ): |
| | | label = contents["assistant"][0] |
| | | self.llm = self.llm.to(dtype_map[llm_dtype]) |
| | | inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype]) |
| | | |
| | | loss = None |
| | | else: |
| | | if not kwargs.get("tearchforing", False): |
| | | |
| | | labels_ids = batch["labels_ids"] |
| | | labels_ids[labels_ids == -1] = -100 |
| | | attention_mask = batch.get("attention_mask", None) |
| | | model_outputs = self.llm( |
| | | inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids |
| | | ) |
| | | generated_ids = self.llm.generate( |
| | | inputs_embeds=inputs_embeds, max_new_tokens=kwargs.get("max_length", 512) |
| | | ) |
| | | # generated_ids = [ |
| | | # output_ids[len(input_id) :] |
| | | # for input_id, output_ids in zip(input_ids, generated_ids) |
| | | # ] |
| | | response = tokenizer.batch_decode( |
| | | generated_ids, skip_special_tokens=kwargs.get("skip_special_tokens", True) |
| | | )[0] |
| | | |
| | | preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :] |
| | | response = tokenizer.batch_decode( |
| | | preds, |
| | | add_special_tokens=False, |
| | | skip_special_tokens=kwargs.get("skip_special_tokens", True), |
| | | )[0] |
| | | loss = model_outputs.loss.item() |
| | | loss = None |
| | | else: |
| | | |
| | | labels_ids = batch["labels_ids"] |
| | | labels_ids[labels_ids == -1] = -100 |
| | | attention_mask = batch.get("attention_mask", None) |
| | | # attention_mask = attention_mask.to(dtype_map[llm_dtype]) |
| | | model_outputs = self.llm( |
| | | inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids |
| | | ) |
| | | |
| | | preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :] |
| | | response = tokenizer.batch_decode( |
| | | preds, |
| | | add_special_tokens=False, |
| | | skip_special_tokens=kwargs.get("skip_special_tokens", True), |
| | | )[0] |
| | | loss = model_outputs.loss.item() |
| | | |
| | | ibest_writer = None |
| | | if kwargs.get("output_dir") is not None: |
| | |
| | | ibest_writer = self.writer[f"{0 + 1}best_recog"] |
| | | |
| | | results = [] |
| | | result_i = {"key": key[0], "text": response, "label": label} |
| | | response_clean = re.sub("[^\w\s\u3000\u4e00-\u9fff]+", "", response) |
| | | result_i = {"key": key[0], "text": response, "text_tn": response_clean, "label": label} |
| | | if loss is not None: |
| | | result_i["loss"] = loss |
| | | results.append(result_i) |
| | |
| | | if ibest_writer is not None: |
| | | ibest_writer["text"][key[0]] = response |
| | | ibest_writer["label"][key[0]] = label |
| | | ibest_writer["text_tn"][key[0]] = response_clean |
| | | |
| | | return results, meta_data |