Dev gzf (#1377)
* update train recipe
* v1.0.8
* llm
* update trainer
| | |
| | | #git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path} |
| | | |
| | | ## generate jsonl from wav.scp and text.txt |
| | | #python funasr/datasets/audio_datasets/scp2jsonl.py \ |
| | | #python -m funasr.datasets.audio_datasets.scp2jsonl \ |
| | | #++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \ |
| | | #++data_type_list='["source", "target"]' \ |
| | | #++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl |
| | |
| | | |
| | | @hydra.main(config_name=None, version_base=None) |
| | | def main_hydra(cfg: DictConfig): |
| | | """ |
| | | python funasr/datasets/audio_datasets/scp2jsonl.py \ |
| | | ++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \ |
| | | ++data_type_list='["source", "target"]' \ |
| | | ++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl |
| | | |
| | | """ |
| | | |
| | | |
| | | kwargs = OmegaConf.to_container(cfg, resolve=True) |
| | | |
| | | scp_file_list = kwargs.get("scp_file_list", ("/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt")) |
| | |
| | | gen_jsonl_from_wav_text_list(scp_file_list, data_type_list=data_type_list, jsonl_file_out=jsonl_file_out) |
| | | |
| | | |
| | | """ |
| | | python -m funasr.datasets.audio_datasets.scp2jsonl \ |
| | | ++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \ |
| | | ++data_type_list='["source", "target"]' \ |
| | | ++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl |
| | | """ |
| | | |
| | | if __name__ == "__main__": |
| | | main_hydra() |
| | | |
| | |
| | | try: |
| | | from rotary_embedding_torch import RotaryEmbedding |
| | | except: |
| | | print("Please install rotary_embedding_torch by: \n pip install -U rotary_embedding_torch") |
| | | print("If you want use mossformer, lease install rotary_embedding_torch by: \n pip install -U rotary_embedding_torch") |
| | | from funasr.models.transformer.layer_norm import GlobalLayerNorm, CumulativeLayerNorm, ScaleNorm |
| | | from funasr.models.transformer.embedding import ScaledSinuEmbedding |
| | | from funasr.models.transformer.mossformer import FLASH_ShareA_FFConvM |
| | |
| | | speech, speech_lengths = data_in, data_lengths |
| | | if len(speech.shape) < 3: |
| | | speech = speech[None, :, :] |
| | | if speech_lengths is None: |
| | | if speech_lengths is not None: |
| | | speech_lengths = speech_lengths.squeeze(-1) |
| | | else: |
| | | speech_lengths = speech.shape[1] |
| | | else: |
| | | # extract fbank feats |
| | |
| | | |
| | | time2 = time.perf_counter() |
| | | time_escaped = (time2 - time1)/3600.0 |
| | | print(f"\ntime_escaped_epoch: {time_escaped:.3f} hours, estimated to finish {self.max_epoch} epoch: {(self.max_epoch-epoch)*time_escaped:.3f}\n") |
| | | print(f"\nrank: {self.local_rank}, time_escaped_epoch: {time_escaped:.3f} hours, estimated to finish {self.max_epoch} epoch: {(self.max_epoch-epoch)*time_escaped:.3f}\n") |
| | | |
| | | if self.rank == 0: |
| | | average_checkpoints(self.output_dir, self.avg_nbest_model) |
| | |
| | | ) |
| | | pbar.set_description(description) |
| | | if self.writer: |
| | | self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(), |
| | | epoch*len(self.dataloader_train) + batch_idx) |
| | | self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(), self.batch_total) |
| | | self.writer.add_scalar(f'rank{self.local_rank}_lr/train', lr, self.batch_total) |
| | | for key, var in stats.items(): |
| | | self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(), |
| | | epoch * len(self.dataloader_train) + batch_idx) |
| | | self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(), self.batch_total) |
| | | for key, var in speed_stats.items(): |
| | | self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var), |
| | | epoch * len(self.dataloader_train) + batch_idx) |
| | | |
| | | # if batch_idx == 2: |
| | | # break |
| | | self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var), self.batch_total) |
| | | |
| | | |
| | | pbar.close() |
| | | |
| | | def _validate_epoch(self, epoch): |
| | |
| | | |
| | | if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_val): |
| | | pbar.update(self.log_interval) |
| | | time_now = datetime.now() |
| | | time_now = time_now.strftime("%Y-%m-%d %H:%M:%S") |
| | | description = ( |
| | | f"{time_now}, " |
| | | f"rank: {self.local_rank}, " |
| | | f"validation epoch: {epoch}/{self.max_epoch}, " |
| | | f"step: {batch_idx+1}/{len(self.dataloader_val)}, " |