speech_asr
2023-02-15 bf1a3ff4d1d96b8b1f399a67ee8ee8d985cc99aa
funasr/runtime/python/onnxruntime/paraformer/rapid_paraformer/paraformer_onnx.py
@@ -9,10 +9,11 @@
import librosa
import numpy as np
from .utils import (CharTokenizer, Hypothesis, ONNXRuntimeError,
                    OrtInferSession, TokenIDConverter, WavFrontend, get_logger,
from utils.utils import (CharTokenizer, Hypothesis, ONNXRuntimeError,
                    OrtInferSession, TokenIDConverter, get_logger,
                    read_yaml)
from .postprocess_utils import sentence_postprocess
from utils.postprocess_utils import sentence_postprocess
from utils.frontend import WavFrontend
logging = get_logger()
@@ -65,7 +66,7 @@
                  wav_content: Union[str, np.ndarray, List[str]]) -> List:
        def load_wav(path: str) -> np.ndarray:
            waveform, _ = librosa.load(path, sr=None)
            return waveform[None, ...]
            return waveform
        if isinstance(wav_content, np.ndarray):
            return [wav_content]
@@ -139,13 +140,4 @@
        # text = self.tokenizer.tokens2text(token)
        return text
if __name__ == '__main__':
    project_dir = Path(__file__).resolve().parent.parent
    model_dir = "/home/zhifu.gzf/.cache/modelscope/hub/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
    model = Paraformer(model_dir)
    wav_file = os.path.join(model_dir, 'example/asr_example.wav')
    result = model(wav_file)
    print(result)