嘉渊
2023-04-24 6427c834dfd97b1f05c6659cdc7ccf010bf82fe1
funasr/bin/vad_inference.py
@@ -1,6 +1,8 @@
import argparse
import logging
import os
import sys
import json
from pathlib import Path
from typing import Any
from typing import List
@@ -10,6 +12,7 @@
from typing import Union
from typing import Dict
import math
import numpy as np
import torch
from typeguard import check_argument_types
@@ -80,11 +83,13 @@
        self.device = device
        self.dtype = dtype
        self.frontend = frontend
        self.batch_size = batch_size
    @torch.no_grad()
    def __call__(
            self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None
    ) -> List[List[int]]:
            self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
            in_cache: Dict[str, torch.Tensor] = dict()
    ) -> Tuple[List[List[int]], Dict[str, torch.Tensor]]:
        """Inference
        Args:
@@ -100,106 +105,38 @@
            speech = torch.tensor(speech)
        if self.frontend is not None:
            feats, feats_len = self.frontend.forward(speech, speech_lengths)
            self.frontend.filter_length_max = math.inf
            fbanks, fbanks_len = self.frontend.forward_fbank(speech, speech_lengths)
            feats, feats_len = self.frontend.forward_lfr_cmvn(fbanks, fbanks_len)
            fbanks = to_device(fbanks, device=self.device)
            feats = to_device(feats, device=self.device)
            feats_len = feats_len.int()
        else:
            raise Exception("Need to extract feats first, please configure frontend configuration")
        batch = {"feats": feats, "feats_lengths": feats_len, "waveform": speech}
        # a. To device
        batch = to_device(batch, device=self.device)
        # b. Forward Encoder
        segments = self.vad_model(**batch)
        return segments
#def inference(
#        batch_size: int,
#        ngpu: int,
#        log_level: Union[int, str],
#        data_path_and_name_and_type,
#        vad_infer_config: Optional[str],
#        vad_model_file: Optional[str],
#        vad_cmvn_file: Optional[str] = None,
#        raw_inputs: Union[np.ndarray, torch.Tensor] = None,
#        key_file: Optional[str] = None,
#        allow_variable_data_keys: bool = False,
#        output_dir: Optional[str] = None,
#        dtype: str = "float32",
#        seed: int = 0,
#        num_workers: int = 1,
#        fs: Union[dict, int] = 16000,
#        **kwargs,
#):
#    assert check_argument_types()
#    if batch_size > 1:
#        raise NotImplementedError("batch decoding is not implemented")
#    if ngpu > 1:
#        raise NotImplementedError("only single GPU decoding is supported")
#
#    logging.basicConfig(
#        level=log_level,
#        format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
#    )
#
#    if ngpu >= 1 and torch.cuda.is_available():
#        device = "cuda"
#    else:
#        device = "cpu"
#
#    # 1. Set random-seed
#    set_all_random_seed(seed)
#
#    # 2. Build speech2vadsegment
#    speech2vadsegment_kwargs = dict(
#        vad_infer_config=vad_infer_config,
#        vad_model_file=vad_model_file,
#        vad_cmvn_file=vad_cmvn_file,
#        device=device,
#        dtype=dtype,
#    )
#    logging.info("speech2vadsegment_kwargs: {}".format(speech2vadsegment_kwargs))
#    speech2vadsegment = Speech2VadSegment(**speech2vadsegment_kwargs)
#    # 3. Build data-iterator
#    loader = VADTask.build_streaming_iterator(
#        data_path_and_name_and_type,
#        dtype=dtype,
#        batch_size=batch_size,
#        key_file=key_file,
#        num_workers=num_workers,
#        preprocess_fn=VADTask.build_preprocess_fn(speech2vadsegment.vad_infer_args, False),
#        collate_fn=VADTask.build_collate_fn(speech2vadsegment.vad_infer_args, False),
#        allow_variable_data_keys=allow_variable_data_keys,
#        inference=True,
#    )
#
#    finish_count = 0
#    file_count = 1
#    # 7 .Start for-loop
#    # FIXME(kamo): The output format should be discussed about
#    if output_dir is not None:
#        writer = DatadirWriter(output_dir)
#    else:
#        writer = None
#
#    vad_results = []
#    for keys, batch in loader:
#        assert isinstance(batch, dict), type(batch)
#        assert all(isinstance(s, str) for s in keys), keys
#        _bs = len(next(iter(batch.values())))
#        assert len(keys) == _bs, f"{len(keys)} != {_bs}"
#        # batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
#
#        # do vad segment
#        results = speech2vadsegment(**batch)
#        for i, _ in enumerate(keys):
#            item = {'key': keys[i], 'value': results[i]}
#            vad_results.append(item)
#
#    return vad_results
        # b. Forward Encoder streaming
        t_offset = 0
        step = min(feats_len.max(), 6000)
        segments = [[]] * self.batch_size
        for t_offset in range(0, feats_len, min(step, feats_len - t_offset)):
            if t_offset + step >= feats_len - 1:
                step = feats_len - t_offset
                is_final = True
            else:
                is_final = False
            batch = {
                "feats": feats[:, t_offset:t_offset + step, :],
                "waveform": speech[:, t_offset * 160:min(speech.shape[-1], (t_offset + step - 1) * 160 + 400)],
                "is_final": is_final,
                "in_cache": in_cache
            }
            # a. To device
            batch = to_device(batch, device=self.device)
            segments_part, in_cache = self.vad_model(**batch)
            if segments_part:
                for batch_num in range(0, self.batch_size):
                    segments[batch_num] += segments_part[batch_num]
        return fbanks, segments
def inference(
@@ -236,11 +173,12 @@
    )
    return inference_pipeline(data_path_and_name_and_type, raw_inputs)
def inference_modelscope(
        batch_size: int,
        ngpu: int,
        log_level: Union[int, str],
        #data_path_and_name_and_type,
        # data_path_and_name_and_type,
        vad_infer_config: Optional[str],
        vad_model_file: Optional[str],
        vad_cmvn_file: Optional[str] = None,
@@ -284,11 +222,17 @@
    speech2vadsegment = Speech2VadSegment(**speech2vadsegment_kwargs)
    def _forward(
        data_path_and_name_and_type,
        raw_inputs: Union[np.ndarray, torch.Tensor] = None,
        output_dir_v2: Optional[str] = None,
            data_path_and_name_and_type,
            raw_inputs: Union[np.ndarray, torch.Tensor] = None,
            output_dir_v2: Optional[str] = None,
            fs: dict = None,
            param_dict: dict = None
    ):
        # 3. Build data-iterator
        if data_path_and_name_and_type is None and raw_inputs is not None:
            if isinstance(raw_inputs, torch.Tensor):
                raw_inputs = raw_inputs.numpy()
            data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
        loader = VADTask.build_streaming_iterator(
            data_path_and_name_and_type,
            dtype=dtype,
@@ -319,14 +263,16 @@
            assert all(isinstance(s, str) for s in keys), keys
            _bs = len(next(iter(batch.values())))
            assert len(keys) == _bs, f"{len(keys)} != {_bs}"
            # batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
            # do vad segment
            results = speech2vadsegment(**batch)
            _, results = speech2vadsegment(**batch)
            for i, _ in enumerate(keys):
                if "MODELSCOPE_ENVIRONMENT" in os.environ and os.environ["MODELSCOPE_ENVIRONMENT"] == "eas":
                    results[i] = json.dumps(results[i])
                item = {'key': keys[i], 'value': results[i]}
                vad_results.append(item)
                if writer is not None:
                    results[i] = json.loads(results[i])
                    ibest_writer["text"][keys[i]] = "{}".format(results[i])
        return vad_results