majic31
2024-12-24 23e7ddebccd3b05cf7ef89809bcfe565ad6dfa1f
examples/industrial_data_pretraining/sense_voice/demo.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# Copyright FunASR (https://github.com/FunAudioLLM/SenseVoice). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
model_dir = "/Users/zhifu/Downloads/modelscope_models/SenseVoiceSmall"  # "iic/SenseVoiceSmall"
model_dir = "iic/SenseVoiceSmall"
model = AutoModel(
@@ -19,40 +18,57 @@
# en
res = model.generate(
    input="/Users/zhifu/Downloads/8_output.wav",
    input=f"{model.model_path}/example/en.mp3",
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #
    merge_length_s=0.1,
    merge_length_s=15,
)
text = rich_transcription_postprocess(res[0]["text"])
print(text)
# en
# en with timestamp
res = model.generate(
    input="/Users/zhifu/Downloads/8_output.wav",
    input=f"{model.model_path}/example/en.mp3",
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=False,  #
    merge_vad=True,  #
    merge_length_s=15,
    output_timestamp=True,
)
print(res)
text = rich_transcription_postprocess(res[0]["text"])
print(text)
raise "exit"
# zh
res = model.generate(
    input=f"{model.model_path}/example/zh.mp3",
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #
    merge_length_s=15,
)
text = rich_transcription_postprocess(res[0]["text"])
print(text)
# zh with timestamp
res = model.generate(
    input=f"{model.model_path}/example/zh.mp3",
    cache={},
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #
    merge_length_s=15,
    output_timestamp=True,
)
print(res)
text = rich_transcription_postprocess(res[0]["text"])
print(text)
@@ -60,7 +76,7 @@
res = model.generate(
    input=f"{model.model_path}/example/yue.mp3",
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #
@@ -73,7 +89,7 @@
res = model.generate(
    input=f"{model.model_path}/example/ja.mp3",
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #
@@ -87,7 +103,7 @@
res = model.generate(
    input=f"{model.model_path}/example/ko.mp3",
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    language="auto",  # "zh", "en", "yue", "ja", "ko", "nospeech"
    use_itn=True,
    batch_size_s=60,
    merge_vad=True,  #