zhifu gao
2023-03-13 a4d87a7fff1eccd192b9a3637ecb185b009c7977
Merge pull request #217 from alibaba-damo-academy/dev_wjm

Dev wjm
2个文件已修改
5个文件已添加
190 ■■■■■ 已修改文件
egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/infer.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tests/test_asr_inference_pipeline.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tests/test_asr_vad_punc_inference_pipeline.py 32 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tests/test_lm_pipeline.py 25 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tests/test_punctuation_pipeline.py 43 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tests/test_sv_inference_pipeline.py 47 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
tests/test_vad_inference_pipeline.py 35 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/infer.py
@@ -5,7 +5,7 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
inference_pipline = pipeline(
inference_pipeline = pipeline(
    task=Tasks.punctuation,
    model='damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727',
    model_revision="v1.0.0",
@@ -17,7 +17,7 @@
cache_out = []
rec_result_all="outputs:"
for vad in vads:
    rec_result = inference_pipline(text_in=vad, cache=cache_out)
    rec_result = inference_pipeline(text_in=vad, cache=cache_out)
    #print(rec_result)
    cache_out = rec_result['cache']
    rec_result_all += rec_result['text']
tests/test_asr_inference_pipeline.py
@@ -451,8 +451,8 @@
    def test_uniasr_2pass_zhcn_16k_common_vocab8358_offline(self):
        inference_pipeline = pipeline(
            task=Tasks.auto_speech_recognition,
            model='damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline')
            task=Tasks.,
            model='damo/speech_UniASauto_speech_recognitionR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline')
        rec_result = inference_pipeline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav',
            param_dict={"decoding_model": "offline"})
tests/test_asr_vad_punc_inference_pipeline.py
New file
@@ -0,0 +1,32 @@
import unittest
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
logger = get_logger()
class TestParaformerInferencePipelines(unittest.TestCase):
    def test_funasr_path(self):
        import funasr
        import os
        logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
    def test_inference_pipeline(self):
        inference_pipeline = pipeline(
            task=Tasks.auto_speech_recognition,
            model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
            model_revision="v1.2.1",
            vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',
            vad_model_revision="v1.1.8",
            punc_model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
            punc_model_revision="v1.1.6",
            ngpu=1,
        )
        rec_result = inference_pipeline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
        logger.info("asr_vad_punc inference result: {0}".format(rec_result))
if __name__ == '__main__':
    unittest.main()
tests/test_lm_pipeline.py
New file
@@ -0,0 +1,25 @@
import unittest
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
logger = get_logger()
class TestTransformerInferencePipelines(unittest.TestCase):
    def test_funasr_path(self):
        import funasr
        import os
        logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
    def test_inference_pipeline(self):
        inference_pipeline = pipeline(
            task=Tasks.language_score_prediction,
            model='damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch',
        )
        rec_result = inference_pipeline(text_in="hello 大 家 好 呀")
        logger.info("lm inference result: {0}".format(rec_result))
if __name__ == '__main__':
    unittest.main()
tests/test_punctuation_pipeline.py
New file
@@ -0,0 +1,43 @@
import unittest
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
logger = get_logger()
class TestTransformerInferencePipelines(unittest.TestCase):
    def test_funasr_path(self):
        import funasr
        import os
        logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
    def test_inference_pipeline(self):
        inference_pipeline = pipeline(
            task=Tasks.punctuation,
            model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
            model_revision="v1.1.7",
        )
        inputs = "./egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/data/punc_example.txt"
        rec_result = inference_pipeline(text_in=inputs)
        logger.info("punctuation inference result: {0}".format(rec_result))
    def test_vadrealtime_inference_pipeline(self):
        inference_pipeline = pipeline(
            task=Tasks.punctuation,
            model='damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727',
            model_revision="v1.0.0",
        )
        inputs = "跨境河流是养育沿岸|人民的生命之源长期以来为帮助下游地区防灾减灾中方技术人员|在上游地区极为恶劣的自然条件下克服巨大困难甚至冒着生命危险|向印方提供汛期水文资料处理紧急事件中方重视印方在跨境河流问题上的关切|愿意进一步完善双方联合工作机制|凡是|中方能做的我们|都会去做而且会做得更好我请印度朋友们放心中国在上游的|任何开发利用都会经过科学|规划和论证兼顾上下游的利益"
        vads = inputs.split("|")
        cache_out = []
        rec_result_all = "outputs:"
        for vad in vads:
            rec_result = inference_pipeline(text_in=vad, cache=cache_out)
            cache_out = rec_result['cache']
            rec_result_all += rec_result['text']
        logger.info("punctuation inference result: {0}".format(rec_result_all))
if __name__ == '__main__':
    unittest.main()
tests/test_sv_inference_pipeline.py
New file
@@ -0,0 +1,47 @@
import unittest
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
logger = get_logger()
class TestXVectorInferencePipelines(unittest.TestCase):
    def test_funasr_path(self):
        import funasr
        import os
        logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
    def test_inference_pipeline(self):
        inference_sv_pipline = pipeline(
            task=Tasks.speaker_verification,
            model='damo/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch'
        )
        # 提取不同句子的说话人嵌入码
        rec_result = inference_sv_pipline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_enroll.wav')
        enroll = rec_result["spk_embedding"]
        rec_result = inference_sv_pipline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_same.wav')
        same = rec_result["spk_embedding"]
        rec_result = inference_sv_pipline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_different.wav')
        different = rec_result["spk_embedding"]
        # 对相同的说话人计算余弦相似度
        sv_threshold = 0.9465
        same_cos = np.sum(enroll * same) / (np.linalg.norm(enroll) * np.linalg.norm(same))
        same_cos = max(same_cos - sv_threshold, 0.0) / (1.0 - sv_threshold) * 100.0
        logger.info("Similarity: {}".format(same_cos))
        # 对不同的说话人计算余弦相似度
        diff_cos = np.sum(enroll * different) / (np.linalg.norm(enroll) * np.linalg.norm(different))
        diff_cos = max(diff_cos - sv_threshold, 0.0) / (1.0 - sv_threshold) * 100.0
        logger.info("Similarity: {}".format(diff_cos))
if __name__ == '__main__':
    unittest.main()
tests/test_vad_inference_pipeline.py
New file
@@ -0,0 +1,35 @@
import unittest
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
logger = get_logger()
class TestFSMNInferencePipelines(unittest.TestCase):
    def test_funasr_path(self):
        import funasr
        import os
        logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__))
    def test_8k(self):
        inference_pipeline = pipeline(
            task=Tasks.voice_activity_detection,
            model="damo/speech_fsmn_vad_zh-cn-8k-common",
        )
        rec_result = inference_pipeline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example_8k.wav')
        logger.info("vad inference result: {0}".format(rec_result))
    def test_16k(self):
        inference_pipeline = pipeline(
            task=Tasks.voice_activity_detection,
            model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
        )
        rec_result = inference_pipeline(
            audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav')
        logger.info("vad inference result: {0}".format(rec_result))
if __name__ == '__main__':
    unittest.main()