From b5d3df75cf6462aa3bf42fd3c86fa2aa7f1c8a15 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 24 十一月 2023 00:54:44 +0800
Subject: [PATCH] setup jamo
---
funasr/bin/asr_infer.py | 33 ++++++++++++++++-----------------
1 files changed, 16 insertions(+), 17 deletions(-)
diff --git a/funasr/bin/asr_infer.py b/funasr/bin/asr_infer.py
index 4648fb3..a1cede1 100644
--- a/funasr/bin/asr_infer.py
+++ b/funasr/bin/asr_infer.py
@@ -34,8 +34,8 @@
from funasr.modules.scorers.ctc import CTCPrefixScorer
from funasr.modules.scorers.length_bonus import LengthBonus
from funasr.build_utils.build_asr_model import frontend_choices
-from funasr.text.build_tokenizer import build_tokenizer
-from funasr.text.token_id_converter import TokenIDConverter
+from funasr.tokenizer.build_tokenizer import build_tokenizer
+from funasr.tokenizer.token_id_converter import TokenIDConverter
from funasr.torch_utils.device_funcs import to_device
from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
@@ -44,9 +44,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2Text("asr_config.yml", "asr.pb")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -251,9 +251,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2TextParaformer("asr_config.yml", "asr.pb")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -625,9 +625,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2TextParaformerOnline("asr_config.yml", "asr.pth")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -876,9 +876,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2TextUniASR("asr_config.yml", "asr.pb")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -1106,9 +1106,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2TextMFCCA("asr_config.yml", "asr.pb")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -1605,7 +1605,6 @@
feats_lengths = to_device(feats_lengths, device=self.device)
enc_out, _, _ = self.asr_model.encoder(feats, feats_lengths)
-
nbest_hyps = self.beam_search(enc_out[0])
return nbest_hyps
@@ -1638,9 +1637,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2TextSAASR("asr_config.yml", "asr.pb")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
@@ -1886,9 +1885,9 @@
"""Speech2Text class
Examples:
- >>> import soundfile
+ >>> import librosa
>>> speech2text = Speech2Text("asr_config.yml", "asr.pb")
- >>> audio, rate = soundfile.read("speech.wav")
+ >>> audio, rate = librosa.load("speech.wav")
>>> speech2text(audio)
[(text, token, token_int, hypothesis object), ...]
--
Gitblit v1.9.1