游雁
2023-11-24 b5d3df75cf6462aa3bf42fd3c86fa2aa7f1c8a15
funasr/build_utils/build_trainer.py
@@ -25,7 +25,6 @@
import torch
import torch.nn
import torch.optim
from typeguard import check_argument_types
from funasr.iterators.abs_iter_factory import AbsIterFactory
from funasr.main_funcs.average_nbest_models import average_nbest_models
@@ -118,7 +117,6 @@
    def build_options(self, args: argparse.Namespace) -> TrainerOptions:
        """Build options consumed by train(), eval()"""
        assert check_argument_types()
        return build_dataclass(TrainerOptions, args)
    @classmethod
@@ -156,7 +154,6 @@
    def run(self) -> None:
        """Perform training. This method performs the main process of training."""
        assert check_argument_types()
        # NOTE(kamo): Don't check the type more strictly as far trainer_options
        model = self.model
        optimizers = self.optimizers
@@ -249,14 +246,11 @@
        for iepoch in range(start_epoch, trainer_options.max_epoch + 1):
            if iepoch != start_epoch:
                logging.info(
                    "{}/{}epoch started. Estimated time to finish: {}".format(
                    "{}/{}epoch started. Estimated time to finish: {} hours".format(
                        iepoch,
                        trainer_options.max_epoch,
                        humanfriendly.format_timespan(
                            (time.perf_counter() - start_time)
                            / (iepoch - start_epoch)
                            * (trainer_options.max_epoch - iepoch + 1)
                        ),
                        (time.perf_counter() - start_time) / 3600.0 / (iepoch - start_epoch) * (
                                trainer_options.max_epoch - iepoch + 1),
                    )
                )
            else:
@@ -522,7 +516,6 @@
            options: TrainerOptions,
            distributed_option: DistributedOption,
    ) -> Tuple[bool, bool]:
        assert check_argument_types()
        grad_noise = options.grad_noise
        accum_grad = options.accum_grad
@@ -758,7 +751,6 @@
            options: TrainerOptions,
            distributed_option: DistributedOption,
    ) -> None:
        assert check_argument_types()
        ngpu = options.ngpu
        # no_forward_run = options.no_forward_run
        distributed = distributed_option.distributed