Merge branch 'main' of github.com:alibaba-damo-academy/FunASR
add
| | |
| | | # Get Started |
| | | To use this example, please execute the first stage of run.sh first to obtain the prepared data and pre-trained models: |
| | | ```shell |
| | | sh run.sh --stage 0 --stop_stage 0 |
| | | ``` |
| | | Then, you can execute unit_test.py to check the correctness of code: |
| | | ```shell |
| | | python unit_test.py |
| | | # you will get the results: |
| | | [{'key': 'R8002_M8002_MS802-S0000_0000000_0001600', 'value': 'spk1 [(0.0, 8.88), (10.72, 11.92), (12.64, 15.2)]\nspk2 [(8.8, 9.76)]\nspk3 [(9.6, 10.96), (15.12, 15.68)]\nspk4 [(11.12, 12.72)]'}] |
| | | [{'key': 'R8002_M8002_MS802-S0000_0000000_0001600', 'value': 'spk1 [(0.0, 8.88), (10.72, 11.92), (12.64, 15.2)]\nspk2 [(8.8, 9.76)]\nspk3 [(9.6, 10.96), (15.12, 15.68)]\nspk4 [(11.12, 12.72)]'}] |
| | | [{'key': 'R8002_M8002_MS802-S0000_0000000_0001600', 'value': 'spk1 [(0.0, 8.88), (10.72, 11.92), (12.64, 15.2)]\nspk2 [(8.8, 9.76)]\nspk3 [(9.6, 10.88), (15.12, 15.68)]\nspk4 [(11.12, 12.72)]'}] |
| | | [{'key': 'test0', 'value': 'spk1 [(0.0, 8.88), (10.64, 15.2)]\nspk2 [(8.88, 9.84)]\nspk3 [(9.6, 11.04), (15.12, 15.68)]\nspk4 [(11.2, 11.76)]'}] |
| | | ``` |
| | | You can also execute run.sh to reproduce the diarization performance reported in [1] |
| | | ```shell |
| | | sh run.sh --stage 1 --stop_stage 2 |
| | | ``` |
| | | |
| | | # Results |
| | | You will get a DER about 4.21%, which is reported in [1], Table 6, line "SOND Oracle Profile". |
| | | After executing "run.sh", you will get a DER about 4.21%, which is reported in [1], Table 6, line "SOND Oracle Profile". |
| | | |
| | | # Reference |
| | | [1] Speaker Overlap-aware Neural Diarization for Multi-party Meeting Analysis, Zhihao Du, Shiliang Zhang, |
| | |
| | | import os |
| | | import random |
| | | import soundfile |
| | | import numpy |
| | | from functools import partial |
| | | |
| | | import torch |
| | | import torchaudio |
| | | import torch.distributed as dist |
| | | from kaldiio import ReadHelper |
| | | from torch.utils.data import IterableDataset |
| | |
| | | sample_dict["key"] = key |
| | | elif data_type == "sound": |
| | | key, path = item.strip().split() |
| | | mat, sampling_rate = soundfile.read(path) |
| | | waveform, sampling_rate = torchaudio.load(path) |
| | | waveform = waveform.numpy() |
| | | mat = waveform[0] |
| | | sample_dict[data_name] = mat |
| | | sample_dict["sampling_rate"] = sampling_rate |
| | | if data_name == "speech": |