| | |
| | | import logging |
| | | import os |
| | | |
| | | import numpy as np |
| | | import torch |
| | | |
| | | from funasr.datasets.small_datasets.dataset import ESPnetDataset |
| | | from funasr.datasets.small_datasets.preprocessor import build_preprocess |
| | | from funasr.samplers.length_batch_sampler import LengthBatchSampler |
| | |
| | | f"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, " |
| | | f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}" |
| | | ) |
| | | |
| | | if args.scheduler == "tri_stage" and mode == "train": |
| | | args.max_update = len(bs_list) * args.max_epoch |
| | | logging.info("Max update: {}".format(args.max_update)) |
| | | |
| | | if args.distributed: |
| | | world_size = torch.distributed.get_world_size() |
| | | rank = torch.distributed.get_rank() |
| | | for batch in batches: |
| | | if len(batch) < world_size: |
| | | raise RuntimeError( |
| | | f"The batch-size must be equal or more than world_size: " |
| | | f"{len(batch)} < {world_size}" |
| | | ) |
| | | batches = [batch[rank::world_size] for batch in batches] |