From c8fe32c5bc0d7ec434d4cbce9f38bd0e1617c93a Mon Sep 17 00:00:00 2001
From: zhaomingwork <zhaomingwork@qq.com>
Date: 星期一, 15 五月 2023 15:00:16 +0800
Subject: [PATCH] change readme for more detail
---
funasr/bin/asr_inference_paraformer.py | 736 ++++++++++++++++++++++++++++++++++++++++++-------------
1 files changed, 557 insertions(+), 179 deletions(-)
diff --git a/funasr/bin/asr_inference_paraformer.py b/funasr/bin/asr_inference_paraformer.py
old mode 100755
new mode 100644
index 5673403..ecdb62a
--- a/funasr/bin/asr_inference_paraformer.py
+++ b/funasr/bin/asr_inference_paraformer.py
@@ -3,6 +3,11 @@
import logging
import sys
import time
+import copy
+import os
+import codecs
+import tempfile
+import requests
from pathlib import Path
from typing import Optional
from typing import Sequence
@@ -35,26 +40,23 @@
from funasr.utils.types import str_or_none
from funasr.utils import asr_utils, wav_utils, postprocess_utils
from funasr.models.frontend.wav_frontend import WavFrontend
-
-from modelscope.utils.logger import get_logger
-
-logger = get_logger()
-
-header_colors = '\033[95m'
-end_colors = '\033[0m'
-
-global_asr_language: str = 'zh-cn'
-global_sample_rate: Union[int, Dict[Any, int]] = {
- 'audio_fs': 16000,
- 'model_fs': 16000
-}
+from funasr.models.e2e_asr_paraformer import BiCifParaformer, ContextualParaformer
+from funasr.models.e2e_asr_contextual_paraformer import NeatContextualParaformer
+from funasr.export.models.e2e_asr_paraformer import Paraformer as Paraformer_export
+from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
+from funasr.bin.tp_inference import SpeechText2Timestamp
+from funasr.bin.vad_inference import Speech2VadSegment
+from funasr.bin.punctuation_infer import Text2Punc
+from funasr.utils.vad_utils import slice_padding_fbank
+from funasr.tasks.vad import VADTask
+from funasr.utils.timestamp_tools import time_stamp_sentence, ts_prediction_lfr6_standard
class Speech2Text:
"""Speech2Text class
Examples:
>>> import soundfile
- >>> speech2text = Speech2Text("asr_config.yml", "asr.pth")
+ >>> speech2text = Speech2Text("asr_config.yml", "asr.pb")
>>> audio, rate = soundfile.read("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -65,6 +67,7 @@
self,
asr_train_config: Union[Path, str] = None,
asr_model_file: Union[Path, str] = None,
+ cmvn_file: Union[Path, str] = None,
lm_train_config: Union[Path, str] = None,
lm_file: Union[Path, str] = None,
token_type: str = None,
@@ -80,6 +83,7 @@
penalty: float = 0.0,
nbest: int = 1,
frontend_conf: dict = None,
+ hotword_list_or_file: str = None,
**kwargs,
):
assert check_argument_types()
@@ -87,19 +91,23 @@
# 1. Build ASR model
scorers = {}
asr_model, asr_train_args = ASRTask.build_model_from_file(
- asr_train_config, asr_model_file, device
+ asr_train_config, asr_model_file, cmvn_file, device
)
- if asr_model.frontend is None and frontend_conf is not None:
- frontend = WavFrontend(**frontend_conf)
- asr_model.frontend = frontend
+ frontend = None
+ if asr_train_args.frontend is not None and asr_train_args.frontend_conf is not None:
+ frontend = WavFrontend(cmvn_file=cmvn_file, **asr_train_args.frontend_conf)
+
logging.info("asr_model: {}".format(asr_model))
logging.info("asr_train_args: {}".format(asr_train_args))
asr_model.to(dtype=getattr(torch, dtype)).eval()
- ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
+ if asr_model.ctc != None:
+ ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
+ scorers.update(
+ ctc=ctc
+ )
token_list = asr_model.token_list
scorers.update(
- ctc=ctc,
length_bonus=LengthBonus(len(token_list)),
)
@@ -141,7 +149,7 @@
for scorer in scorers.values():
if isinstance(scorer, torch.nn.Module):
scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
- logging.info(f"Beam_search: {beam_search}")
+
logging.info(f"Decoding device={device}, dtype={dtype}")
# 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
@@ -166,20 +174,31 @@
self.asr_train_args = asr_train_args
self.converter = converter
self.tokenizer = tokenizer
- has_lm = lm_weight == 0.0 or lm_file is None
- if ctc_weight == 0.0 and has_lm:
+
+ # 6. [Optional] Build hotword list from str, local file or url
+ self.hotword_list = None
+ self.hotword_list = self.generate_hotwords_list(hotword_list_or_file)
+
+ is_use_lm = lm_weight != 0.0 and lm_file is not None
+ if (ctc_weight == 0.0 or asr_model.ctc == None) and not is_use_lm:
beam_search = None
self.beam_search = beam_search
+ logging.info(f"Beam_search: {self.beam_search}")
self.beam_search_transducer = beam_search_transducer
self.maxlenratio = maxlenratio
self.minlenratio = minlenratio
self.device = device
self.dtype = dtype
self.nbest = nbest
+ self.frontend = frontend
+ self.encoder_downsampling_factor = 1
+ if asr_train_args.encoder == "data2vec_encoder" or asr_train_args.encoder_conf["input_layer"] == "conv2d":
+ self.encoder_downsampling_factor = 4
@torch.no_grad()
def __call__(
- self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None
+ self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None,
+ begin_time: int = 0, end_time: int = None,
):
"""Inference
@@ -195,14 +214,16 @@
if isinstance(speech, np.ndarray):
speech = torch.tensor(speech)
- # data: (Nsamples,) -> (1, Nsamples)
- # lengths: (1,)
- # if len(speech.size()) < 3:
- # speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
- # speech_lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
- lfr_factor = max(1, (speech.size()[-1]//80)-1)
-
- batch = {"speech": speech, "speech_lengths": speech_lengths}
+ if self.frontend is not None:
+ feats, feats_len = self.frontend.forward(speech, speech_lengths)
+ feats = to_device(feats, device=self.device)
+ feats_len = feats_len.int()
+ self.asr_model.frontend = None
+ else:
+ feats = speech
+ feats_len = speech_lengths
+ lfr_factor = max(1, (feats.size()[-1] // 80) - 1)
+ batch = {"speech": feats, "speech_lengths": feats_len}
# a. To device
batch = to_device(batch, device=self.device)
@@ -212,13 +233,26 @@
if isinstance(enc, tuple):
enc = enc[0]
# assert len(enc) == 1, len(enc)
- enc_len_batch_total = torch.sum(enc_len).item()
+ enc_len_batch_total = torch.sum(enc_len).item() * self.encoder_downsampling_factor
predictor_outs = self.asr_model.calc_predictor(enc, enc_len)
- pre_acoustic_embeds, pre_token_length = predictor_outs[0], predictor_outs[1]
+ pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index = predictor_outs[0], predictor_outs[1], \
+ predictor_outs[2], predictor_outs[3]
pre_token_length = pre_token_length.round().long()
- decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)
- decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
+ if torch.max(pre_token_length) < 1:
+ return []
+ if not isinstance(self.asr_model, ContextualParaformer) and not isinstance(self.asr_model, NeatContextualParaformer):
+ if self.hotword_list:
+ logging.warning("Hotword is given but asr model is not a ContextualParaformer.")
+ decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)
+ decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
+ else:
+ decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length, hw_list=self.hotword_list)
+ decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
+
+ if isinstance(self.asr_model, BiCifParaformer):
+ _, _, us_alphas, us_peaks = self.asr_model.calc_predictor_timestamp(enc, enc_len,
+ pre_token_length) # test no bias cif2
results = []
b, n, d = decoder_out.size()
@@ -229,7 +263,7 @@
nbest_hyps = self.beam_search(
x=x, am_scores=am_scores, maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
)
-
+
nbest_hyps = nbest_hyps[: self.nbest]
else:
yseq = am_scores.argmax(dim=-1)
@@ -240,35 +274,96 @@
[self.asr_model.sos] + yseq.tolist() + [self.asr_model.eos], device=yseq.device
)
nbest_hyps = [Hypothesis(yseq=yseq, score=score)]
-
+
for hyp in nbest_hyps:
assert isinstance(hyp, (Hypothesis)), type(hyp)
-
+
# remove sos/eos and get results
last_pos = -1
if isinstance(hyp.yseq, list):
token_int = hyp.yseq[1:last_pos]
else:
token_int = hyp.yseq[1:last_pos].tolist()
-
+
# remove blank symbol id, which is assumed to be 0
- token_int = list(filter(lambda x: x != 0, token_int))
-
+ token_int = list(filter(lambda x: x != 0 and x != 2, token_int))
+
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
-
+
if self.tokenizer is not None:
text = self.tokenizer.tokens2text(token)
else:
text = None
-
- results.append((text, token, token_int, hyp, enc_len_batch_total, lfr_factor))
+ timestamp = []
+ if isinstance(self.asr_model, BiCifParaformer):
+ _, timestamp = ts_prediction_lfr6_standard(us_alphas[i][:enc_len[i]*3],
+ us_peaks[i][:enc_len[i]*3],
+ copy.copy(token),
+ vad_offset=begin_time)
+ results.append((text, token, token_int, hyp, timestamp, enc_len_batch_total, lfr_factor))
+
# assert check_return_type(results)
return results
+ def generate_hotwords_list(self, hotword_list_or_file):
+ # for None
+ if hotword_list_or_file is None:
+ hotword_list = None
+ # for local txt inputs
+ elif os.path.exists(hotword_list_or_file) and hotword_list_or_file.endswith('.txt'):
+ logging.info("Attempting to parse hotwords from local txt...")
+ hotword_list = []
+ hotword_str_list = []
+ with codecs.open(hotword_list_or_file, 'r') as fin:
+ for line in fin.readlines():
+ hw = line.strip()
+ hotword_str_list.append(hw)
+ hotword_list.append(self.converter.tokens2ids([i for i in hw]))
+ hotword_list.append([self.asr_model.sos])
+ hotword_str_list.append('<s>')
+ logging.info("Initialized hotword list from file: {}, hotword list: {}."
+ .format(hotword_list_or_file, hotword_str_list))
+ # for url, download and generate txt
+ elif hotword_list_or_file.startswith('http'):
+ logging.info("Attempting to parse hotwords from url...")
+ work_dir = tempfile.TemporaryDirectory().name
+ if not os.path.exists(work_dir):
+ os.makedirs(work_dir)
+ text_file_path = os.path.join(work_dir, os.path.basename(hotword_list_or_file))
+ local_file = requests.get(hotword_list_or_file)
+ open(text_file_path, "wb").write(local_file.content)
+ hotword_list_or_file = text_file_path
+ hotword_list = []
+ hotword_str_list = []
+ with codecs.open(hotword_list_or_file, 'r') as fin:
+ for line in fin.readlines():
+ hw = line.strip()
+ hotword_str_list.append(hw)
+ hotword_list.append(self.converter.tokens2ids([i for i in hw]))
+ hotword_list.append([self.asr_model.sos])
+ hotword_str_list.append('<s>')
+ logging.info("Initialized hotword list from file: {}, hotword list: {}."
+ .format(hotword_list_or_file, hotword_str_list))
+ # for text str input
+ elif not hotword_list_or_file.endswith('.txt'):
+ logging.info("Attempting to parse hotwords as str...")
+ hotword_list = []
+ hotword_str_list = []
+ for hw in hotword_list_or_file.strip().split():
+ hotword_str_list.append(hw)
+ hotword_list.append(self.converter.tokens2ids([i for i in hw]))
+ hotword_list.append([self.asr_model.sos])
+ hotword_str_list.append('<s>')
+ logging.info("Hotword list: {}.".format(hotword_str_list))
+ else:
+ hotword_list = None
+ return hotword_list
-def inference(
+
+
+def inference_modelscope(
maxlenratio: float,
minlenratio: float,
batch_size: int,
@@ -278,10 +373,10 @@
lm_weight: float,
penalty: float,
log_level: Union[int, str],
- data_path_and_name_and_type,
+ # data_path_and_name_and_type,
asr_train_config: Optional[str],
asr_model_file: Optional[str],
- audio_lists: Union[List[Any], bytes] = None,
+ cmvn_file: Optional[str] = None,
lm_train_config: Optional[str] = None,
lm_file: Optional[str] = None,
token_type: Optional[str] = None,
@@ -289,20 +384,21 @@
word_lm_train_config: Optional[str] = None,
bpemodel: Optional[str] = None,
allow_variable_data_keys: bool = False,
- streaming: bool = False,
- output_dir: Optional[str] = None,
dtype: str = "float32",
seed: int = 0,
ngram_weight: float = 0.9,
nbest: int = 1,
num_workers: int = 1,
- frontend_conf: dict = None,
- fs: Union[dict, int] = 16000,
- lang: Optional[str] = None,
+ output_dir: Optional[str] = None,
+ timestamp_infer_config: Union[Path, str] = None,
+ timestamp_model_file: Union[Path, str] = None,
+ param_dict: dict = None,
**kwargs,
):
assert check_argument_types()
-
+ ncpu = kwargs.get("ncpu", 1)
+ torch.set_num_threads(ncpu)
+
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
if ngpu > 1:
@@ -312,51 +408,22 @@
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
-
- if ngpu >= 1:
+
+ export_mode = False
+ if param_dict is not None:
+ hotword_list_or_file = param_dict.get('hotword')
+ export_mode = param_dict.get("export_mode", False)
+ else:
+ hotword_list_or_file = None
+
+ if kwargs.get("device", None) == "cpu":
+ ngpu = 0
+ if ngpu >= 1 and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
- hop_length: int = 160
- sr: int = 16000
- if isinstance(fs, int):
- sr = fs
- else:
- if 'model_fs' in fs and fs['model_fs'] is not None:
- sr = fs['model_fs']
- # data_path_and_name_and_type for modelscope: (data from audio_lists)
- # ['speech', 'sound', 'am.mvn']
- # data_path_and_name_and_type for funasr:
- # [('/mnt/data/jiangyu.xzy/exp/maas/mvn.1.scp', 'speech', 'kaldi_ark')]
- if isinstance(data_path_and_name_and_type[0], Tuple):
- features_type: str = data_path_and_name_and_type[0][1]
- elif isinstance(data_path_and_name_and_type[0], str):
- features_type: str = data_path_and_name_and_type[1]
- else:
- raise NotImplementedError("unknown features type:{0}".format(data_path_and_name_and_type))
- if features_type != 'sound':
- frontend_conf = None
- flag_modelscope = False
- else:
- flag_modelscope = True
- if frontend_conf is not None:
- if 'hop_length' in frontend_conf:
- hop_length = frontend_conf['hop_length']
+ batch_size = 1
- finish_count = 0
- file_count = 1
- if flag_modelscope and not isinstance(data_path_and_name_and_type[0], Tuple):
- data_path_and_name_and_type_new = [
- audio_lists, data_path_and_name_and_type[0], data_path_and_name_and_type[1]
- ]
- if isinstance(audio_lists, bytes):
- file_count = 1
- else:
- file_count = len(audio_lists)
- if len(data_path_and_name_and_type) >= 3 and frontend_conf is not None:
- mvn_file = data_path_and_name_and_type[2]
- mvn_data = wav_utils.extract_CMVN_featrures(mvn_file)
- frontend_conf['mvn_data'] = mvn_data
# 1. Set random-seed
set_all_random_seed(seed)
@@ -364,6 +431,7 @@
speech2text_kwargs = dict(
asr_train_config=asr_train_config,
asr_model_file=asr_model_file,
+ cmvn_file=cmvn_file,
lm_train_config=lm_train_config,
lm_file=lm_file,
token_type=token_type,
@@ -378,28 +446,46 @@
ngram_weight=ngram_weight,
penalty=penalty,
nbest=nbest,
- frontend_conf=frontend_conf,
+ hotword_list_or_file=hotword_list_or_file,
)
+
speech2text = Speech2Text(**speech2text_kwargs)
- # 3. Build data-iterator
- if flag_modelscope:
- loader = ASRTask.build_streaming_iterator_modelscope(
- data_path_and_name_and_type_new,
- dtype=dtype,
- batch_size=batch_size,
- key_file=key_file,
- num_workers=num_workers,
- preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
- collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
- allow_variable_data_keys=allow_variable_data_keys,
- inference=True,
- sample_rate=fs
+ if timestamp_model_file is not None:
+ speechtext2timestamp = SpeechText2Timestamp(
+ timestamp_cmvn_file=cmvn_file,
+ timestamp_model_file=timestamp_model_file,
+ timestamp_infer_config=timestamp_infer_config,
)
else:
+ speechtext2timestamp = None
+
+ def _forward(
+ data_path_and_name_and_type,
+ raw_inputs: Union[np.ndarray, torch.Tensor] = None,
+ output_dir_v2: Optional[str] = None,
+ fs: dict = None,
+ param_dict: dict = None,
+ **kwargs,
+ ):
+
+ hotword_list_or_file = None
+ if param_dict is not None:
+ hotword_list_or_file = param_dict.get('hotword')
+ if 'hotword' in kwargs and kwargs['hotword'] is not None:
+ hotword_list_or_file = kwargs['hotword']
+ if hotword_list_or_file is not None or 'hotword' in kwargs:
+ speech2text.hotword_list = speech2text.generate_hotwords_list(hotword_list_or_file)
+
+ # 3. Build data-iterator
+ if data_path_and_name_and_type is None and raw_inputs is not None:
+ if isinstance(raw_inputs, torch.Tensor):
+ raw_inputs = raw_inputs.numpy()
+ data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
+ fs=fs,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
@@ -409,76 +495,355 @@
inference=True,
)
- forward_time_total = 0.0
- length_total = 0.0
- # 7 .Start for-loop
- # FIXME(kamo): The output format should be discussed about
- asr_result_list = []
+ if param_dict is not None:
+ use_timestamp = param_dict.get('use_timestamp', True)
+ else:
+ use_timestamp = True
+
+ forward_time_total = 0.0
+ length_total = 0.0
+ finish_count = 0
+ file_count = 1
+ # 7 .Start for-loop
+ # FIXME(kamo): The output format should be discussed about
+ asr_result_list = []
+ output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+ if output_path is not None:
+ writer = DatadirWriter(output_path)
+ else:
+ writer = None
+
+ for keys, batch in loader:
+ assert isinstance(batch, dict), type(batch)
+ assert all(isinstance(s, str) for s in keys), keys
+ _bs = len(next(iter(batch.values())))
+ assert len(keys) == _bs, f"{len(keys)} != {_bs}"
+ # batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
+
+ logging.info("decoding, utt_id: {}".format(keys))
+ # N-best list of (text, token, token_int, hyp_object)
+
+ time_beg = time.time()
+ results = speech2text(**batch)
+ if len(results) < 1:
+ hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
+ results = [[" ", ["sil"], [2], hyp, 10, 6]] * nbest
+ time_end = time.time()
+ forward_time = time_end - time_beg
+ lfr_factor = results[0][-1]
+ length = results[0][-2]
+ forward_time_total += forward_time
+ length_total += length
+ rtf_cur = "decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".format(length, forward_time, 100 * forward_time / (length * lfr_factor))
+ logging.info(rtf_cur)
+
+ for batch_id in range(_bs):
+ result = [results[batch_id][:-2]]
+
+ key = keys[batch_id]
+ for n, result in zip(range(1, nbest + 1), result):
+ text, token, token_int, hyp = result[0], result[1], result[2], result[3]
+ timestamp = result[4] if len(result[4]) > 0 else None
+ # conduct timestamp prediction here
+ # timestamp inference requires token length
+ # thus following inference cannot be conducted in batch
+ if timestamp is None and speechtext2timestamp:
+ ts_batch = {}
+ ts_batch['speech'] = batch['speech'][batch_id].unsqueeze(0)
+ ts_batch['speech_lengths'] = torch.tensor([batch['speech_lengths'][batch_id]])
+ ts_batch['text_lengths'] = torch.tensor([len(token)])
+ us_alphas, us_peaks = speechtext2timestamp(**ts_batch)
+ ts_str, timestamp = ts_prediction_lfr6_standard(us_alphas[0], us_peaks[0], token, force_time_shift=-3.0)
+ # Create a directory: outdir/{n}best_recog
+ if writer is not None:
+ ibest_writer = writer[f"{n}best_recog"]
+
+ # Write the result to each file
+ ibest_writer["token"][key] = " ".join(token)
+ # ibest_writer["token_int"][key] = " ".join(map(str, token_int))
+ ibest_writer["score"][key] = str(hyp.score)
+ ibest_writer["rtf"][key] = rtf_cur
+
+ if text is not None:
+ if use_timestamp and timestamp is not None:
+ postprocessed_result = postprocess_utils.sentence_postprocess(token, timestamp)
+ else:
+ postprocessed_result = postprocess_utils.sentence_postprocess(token)
+ timestamp_postprocessed = ""
+ if len(postprocessed_result) == 3:
+ text_postprocessed, timestamp_postprocessed, word_lists = postprocessed_result[0], \
+ postprocessed_result[1], \
+ postprocessed_result[2]
+ else:
+ text_postprocessed, word_lists = postprocessed_result[0], postprocessed_result[1]
+ item = {'key': key, 'value': text_postprocessed}
+ if timestamp_postprocessed != "":
+ item['timestamp'] = timestamp_postprocessed
+ asr_result_list.append(item)
+ finish_count += 1
+ # asr_utils.print_progress(finish_count / file_count)
+ if writer is not None:
+ ibest_writer["text"][key] = " ".join(word_lists)
+
+ logging.info("decoding, utt: {}, predictions: {}".format(key, text))
+ rtf_avg = "decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".format(length_total, forward_time_total, 100 * forward_time_total / (length_total * lfr_factor))
+ logging.info(rtf_avg)
+ if writer is not None:
+ ibest_writer["rtf"]["rtf_avf"] = rtf_avg
+ return asr_result_list
+
+ return _forward
+
+
+def inference_modelscope_vad_punc(
+ maxlenratio: float,
+ minlenratio: float,
+ batch_size: int,
+ beam_size: int,
+ ngpu: int,
+ ctc_weight: float,
+ lm_weight: float,
+ penalty: float,
+ log_level: Union[int, str],
+ # data_path_and_name_and_type,
+ asr_train_config: Optional[str],
+ asr_model_file: Optional[str],
+ cmvn_file: Optional[str] = None,
+ lm_train_config: Optional[str] = None,
+ lm_file: Optional[str] = None,
+ token_type: Optional[str] = None,
+ key_file: Optional[str] = None,
+ word_lm_train_config: Optional[str] = None,
+ bpemodel: Optional[str] = None,
+ allow_variable_data_keys: bool = False,
+ output_dir: Optional[str] = None,
+ dtype: str = "float32",
+ seed: int = 0,
+ ngram_weight: float = 0.9,
+ nbest: int = 1,
+ num_workers: int = 1,
+ vad_infer_config: Optional[str] = None,
+ vad_model_file: Optional[str] = None,
+ vad_cmvn_file: Optional[str] = None,
+ time_stamp_writer: bool = True,
+ punc_infer_config: Optional[str] = None,
+ punc_model_file: Optional[str] = None,
+ outputs_dict: Optional[bool] = True,
+ param_dict: dict = None,
+ **kwargs,
+):
+ assert check_argument_types()
+ ncpu = kwargs.get("ncpu", 1)
+ torch.set_num_threads(ncpu)
+
+ if word_lm_train_config is not None:
+ raise NotImplementedError("Word LM is not implemented")
+ if ngpu > 1:
+ raise NotImplementedError("only single GPU decoding is supported")
+
+ logging.basicConfig(
+ level=log_level,
+ format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
+ )
+
+ if param_dict is not None:
+ hotword_list_or_file = param_dict.get('hotword')
+ else:
+ hotword_list_or_file = None
+
+ if ngpu >= 1 and torch.cuda.is_available():
+ device = "cuda"
+ else:
+ device = "cpu"
+
+ # 1. Set random-seed
+ set_all_random_seed(seed)
+
+ # 2. Build speech2vadsegment
+ speech2vadsegment_kwargs = dict(
+ vad_infer_config=vad_infer_config,
+ vad_model_file=vad_model_file,
+ vad_cmvn_file=vad_cmvn_file,
+ device=device,
+ dtype=dtype,
+ )
+ # logging.info("speech2vadsegment_kwargs: {}".format(speech2vadsegment_kwargs))
+ speech2vadsegment = Speech2VadSegment(**speech2vadsegment_kwargs)
+
+ # 3. Build speech2text
+ speech2text_kwargs = dict(
+ asr_train_config=asr_train_config,
+ asr_model_file=asr_model_file,
+ cmvn_file=cmvn_file,
+ lm_train_config=lm_train_config,
+ lm_file=lm_file,
+ token_type=token_type,
+ bpemodel=bpemodel,
+ device=device,
+ maxlenratio=maxlenratio,
+ minlenratio=minlenratio,
+ dtype=dtype,
+ beam_size=beam_size,
+ ctc_weight=ctc_weight,
+ lm_weight=lm_weight,
+ ngram_weight=ngram_weight,
+ penalty=penalty,
+ nbest=nbest,
+ hotword_list_or_file=hotword_list_or_file,
+ )
+ speech2text = Speech2Text(**speech2text_kwargs)
+ text2punc = None
+ if punc_model_file is not None:
+ text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
+
if output_dir is not None:
writer = DatadirWriter(output_dir)
- else:
- writer = None
-
- for keys, batch in loader:
- assert isinstance(batch, dict), type(batch)
- assert all(isinstance(s, str) for s in keys), keys
- _bs = len(next(iter(batch.values())))
- assert len(keys) == _bs, f"{len(keys)} != {_bs}"
- # batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
-
- logging.info("decoding, utt_id: {}".format(keys))
- # N-best list of (text, token, token_int, hyp_object)
-
- time_beg = time.time()
- results = speech2text(**batch)
- time_end = time.time()
- forward_time = time_end - time_beg
- lfr_factor = results[0][-1]
- length = results[0][-2]
- forward_time_total += forward_time
- length_total += length
- logging.info(
- "decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".
- format(length, forward_time, 100 * forward_time / (length*lfr_factor)))
+ ibest_writer = writer[f"1best_recog"]
+ ibest_writer["token_list"][""] = " ".join(speech2text.asr_train_args.token_list)
+
+ def _forward(data_path_and_name_and_type,
+ raw_inputs: Union[np.ndarray, torch.Tensor] = None,
+ output_dir_v2: Optional[str] = None,
+ fs: dict = None,
+ param_dict: dict = None,
+ **kwargs,
+ ):
- for batch_id in range(_bs):
- result = [results[batch_id][:-2]]
+ hotword_list_or_file = None
+ if param_dict is not None:
+ hotword_list_or_file = param_dict.get('hotword')
+
+ if 'hotword' in kwargs:
+ hotword_list_or_file = kwargs['hotword']
+
+ if speech2text.hotword_list is None:
+ speech2text.hotword_list = speech2text.generate_hotwords_list(hotword_list_or_file)
+
+ # 3. Build data-iterator
+ if data_path_and_name_and_type is None and raw_inputs is not None:
+ if isinstance(raw_inputs, torch.Tensor):
+ raw_inputs = raw_inputs.numpy()
+ data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
+ loader = ASRTask.build_streaming_iterator(
+ data_path_and_name_and_type,
+ dtype=dtype,
+ fs=fs,
+ batch_size=1,
+ key_file=key_file,
+ num_workers=num_workers,
+ preprocess_fn=VADTask.build_preprocess_fn(speech2vadsegment.vad_infer_args, False),
+ collate_fn=VADTask.build_collate_fn(speech2vadsegment.vad_infer_args, False),
+ allow_variable_data_keys=allow_variable_data_keys,
+ inference=True,
+ )
+
+ if param_dict is not None:
+ use_timestamp = param_dict.get('use_timestamp', True)
+ else:
+ use_timestamp = True
+
+ finish_count = 0
+ file_count = 1
+ lfr_factor = 6
+ # 7 .Start for-loop
+ asr_result_list = []
+ output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+ writer = None
+ if output_path is not None:
+ writer = DatadirWriter(output_path)
+ ibest_writer = writer[f"1best_recog"]
+
+ for keys, batch in loader:
+ assert isinstance(batch, dict), type(batch)
+ assert all(isinstance(s, str) for s in keys), keys
+ _bs = len(next(iter(batch.values())))
+ assert len(keys) == _bs, f"{len(keys)} != {_bs}"
+
+ vad_results = speech2vadsegment(**batch)
+ _, vadsegments = vad_results[0], vad_results[1][0]
+
+ speech, speech_lengths = batch["speech"], batch["speech_lengths"]
+
+ n = len(vadsegments)
+ data_with_index = [(vadsegments[i], i) for i in range(n)]
+ sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
+ results_sorted = []
+ for j, beg_idx in enumerate(range(0, n, batch_size)):
+ end_idx = min(n, beg_idx + batch_size)
+ speech_j, speech_lengths_j = slice_padding_fbank(speech, speech_lengths, sorted_data[beg_idx:end_idx])
+
+ batch = {"speech": speech_j, "speech_lengths": speech_lengths_j}
+ batch = to_device(batch, device=device)
+ results = speech2text(**batch)
+
+ if len(results) < 1:
+ results = [["", [], [], [], [], [], []]]
+ results_sorted.extend(results)
+ restored_data = [0] * n
+ for j in range(n):
+ index = sorted_data[j][1]
+ restored_data[index] = results_sorted[j]
+ result = ["", [], [], [], [], [], []]
+ for j in range(n):
+ result[0] += restored_data[j][0]
+ result[1] += restored_data[j][1]
+ result[2] += restored_data[j][2]
+ if len(restored_data[j][4]) > 0:
+ for t in restored_data[j][4]:
+ t[0] += vadsegments[j][0]
+ t[1] += vadsegments[j][0]
+ result[4] += restored_data[j][4]
+ # result = [result[k]+restored_data[j][k] for k in range(len(result[:-2]))]
+
+ key = keys[0]
+ # result = result_segments[0]
+ text, token, token_int = result[0], result[1], result[2]
+ time_stamp = result[4] if len(result[4]) > 0 else None
+
+ if use_timestamp and time_stamp is not None:
+ postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
+ else:
+ postprocessed_result = postprocess_utils.sentence_postprocess(token)
+ text_postprocessed = ""
+ time_stamp_postprocessed = ""
+ text_postprocessed_punc = postprocessed_result
+ if len(postprocessed_result) == 3:
+ text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], \
+ postprocessed_result[1], \
+ postprocessed_result[2]
+ else:
+ text_postprocessed, word_lists = postprocessed_result[0], postprocessed_result[1]
+
+ text_postprocessed_punc = text_postprocessed
+ punc_id_list = []
+ if len(word_lists) > 0 and text2punc is not None:
+ text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
+
+ item = {'key': key, 'value': text_postprocessed_punc}
+ if text_postprocessed != "":
+ item['text_postprocessed'] = text_postprocessed
+ if time_stamp_postprocessed != "":
+ item['time_stamp'] = time_stamp_postprocessed
+
+ item['sentences'] = time_stamp_sentence(punc_id_list, time_stamp_postprocessed, text_postprocessed)
+
+ asr_result_list.append(item)
+ finish_count += 1
+ # asr_utils.print_progress(finish_count / file_count)
+ if writer is not None:
+ # Write the result to each file
+ ibest_writer["token"][key] = " ".join(token)
+ ibest_writer["token_int"][key] = " ".join(map(str, token_int))
+ ibest_writer["vad"][key] = "{}".format(vadsegments)
+ ibest_writer["text"][key] = " ".join(word_lists)
+ ibest_writer["text_with_punc"][key] = text_postprocessed_punc
+ if time_stamp_postprocessed is not None:
+ ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
+
+ logging.info("decoding, utt: {}, predictions: {}".format(key, text_postprocessed_punc))
+ return asr_result_list
- key = keys[batch_id]
- for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), result):
- # Create a directory: outdir/{n}best_recog
- if writer is not None:
- ibest_writer = writer[f"{n}best_recog"]
-
- # Write the result to each file
- ibest_writer["token"][key] = " ".join(token)
- ibest_writer["token_int"][key] = " ".join(map(str, token_int))
- ibest_writer["score"][key] = str(hyp.score)
-
- if text is not None:
- text_postprocessed = postprocess_utils.sentence_postprocess(token)
- item = {'key': key, 'value': text_postprocessed}
- asr_result_list.append(item)
- finish_count += 1
- asr_utils.print_progress(finish_count / file_count)
- if writer is not None:
- ibest_writer["text"][key] = text
-
- logging.info("decoding, utt: {}, predictions: {}".format(key, text))
-
- logging.info("decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".
- format(length_total, forward_time_total, 100 * forward_time_total / (length_total*lfr_factor)))
- return asr_result_list
-
-
-def set_parameters(language: str = None,
- sample_rate: Union[int, Dict[Any, int]] = None):
- if language is not None:
- global global_asr_language
- global_asr_language = language
- if sample_rate is not None:
- global global_sample_rate
- global_sample_rate = sample_rate
+ return _forward
def get_parser():
@@ -517,12 +882,17 @@
default=1,
help="The number of workers used for DataLoader",
)
-
+ parser.add_argument(
+ "--hotword",
+ type=str_or_none,
+ default=None,
+ help="hotword file path or hotwords seperated by space"
+ )
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
- required=True,
+ required=False,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
@@ -538,6 +908,11 @@
"--asr_model_file",
type=str,
help="ASR model parameter file",
+ )
+ group.add_argument(
+ "--cmvn_file",
+ type=str,
+ help="Global cmvn file",
)
group.add_argument(
"--lm_train_config",
@@ -613,7 +988,7 @@
default=None,
help="",
)
- group.add_argument("--audio_lists", type=list, default=None)
+ group.add_argument("--raw_inputs", type=list, default=None)
# example=[{'key':'EdevDEWdIYQ_0021','file':'/mnt/data/jiangyu.xzy/test_data/speech_io/SPEECHIO_ASR_ZH00007_zhibodaihuo/wav/EdevDEWdIYQ_0021.wav'}])
group = parser.add_argument_group("Text converter related")
@@ -640,9 +1015,12 @@
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
+ param_dict = {'hotword': args.hotword}
kwargs = vars(args)
kwargs.pop("config", None)
- inference(**kwargs)
+ kwargs['param_dict'] = param_dict
+ inference_pipeline = inference_modelscope(**kwargs)
+ return inference_pipeline(kwargs["data_path_and_name_and_type"], param_dict=param_dict)
if __name__ == "__main__":
--
Gitblit v1.9.1