From b18f7d121f2f17df8bf2d0c2bbb223bc5ddbcc0f Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 25 五月 2023 16:11:22 +0800
Subject: [PATCH] docs
---
funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py | 19 ++++++++++++-------
1 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py b/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
index f9232af..9954daa 100644
--- a/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
+++ b/funasr/runtime/python/libtorch/funasr_torch/paraformer_bin.py
@@ -23,7 +23,6 @@
batch_size: int = 1,
device_id: Union[str, int] = "-1",
plot_timestamp_to: str = "",
- pred_bias: int = 1,
quantize: bool = False,
intra_op_num_threads: int = 1,
):
@@ -46,8 +45,12 @@
)
self.ort_infer = torch.jit.load(model_file)
self.batch_size = batch_size
+ self.device_id = device_id
self.plot_timestamp_to = plot_timestamp_to
- self.pred_bias = pred_bias
+ if "predictor_bias" in config['model_conf'].keys():
+ self.pred_bias = config['model_conf']['predictor_bias']
+ else:
+ self.pred_bias = 0
def __call__(self, wav_content: Union[str, np.ndarray, List[str]], **kwargs) -> List:
waveform_list = self.load_data(wav_content, self.frontend.opts.frame_opts.samp_freq)
@@ -58,11 +61,13 @@
end_idx = min(waveform_nums, beg_idx + self.batch_size)
feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
try:
- if int(device_id) != -1:
- feats = feats.cuda()
- feats_len = feats_len.cuda()
- outputs = self.ort_infer(feats, feats_len)
- am_scores, valid_token_lens = outputs[0], outputs[1]
+ with torch.no_grad():
+ if int(self.device_id) == -1:
+ outputs = self.ort_infer(feats, feats_len)
+ am_scores, valid_token_lens = outputs[0], outputs[1]
+ else:
+ outputs = self.ort_infer(feats.cuda(), feats_len.cuda())
+ am_scores, valid_token_lens = outputs[0].cpu(), outputs[1].cpu()
if len(outputs) == 4:
# for BiCifParaformer Inference
us_alphas, us_peaks = outputs[2], outputs[3]
--
Gitblit v1.9.1