Xuning Tan
2025-10-01 8c6d1642f5fbf1d55edb324e35e9fa6e89da25a1
examples/industrial_data_pretraining/paraformer/export.py
@@ -8,21 +8,22 @@
from funasr import AutoModel
wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav"
model = AutoModel(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
                  model_revision="v2.0.4")
model = AutoModel(
    model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404",
)
res = model.export(input=wav_file, type="onnx", quantize=False)
res = model.export(type="torchscript", quantize=False)
# res = model.export(type="bladedisc", input=f"{model.model_path}/example/asr_example.wav")
print(res)
# method2, inference from local path
from funasr import AutoModel
wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav"
model = AutoModel(model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
res = model.export(input=wav_file, type="onnx", quantize=False)
print(res)
# # method2, inference from local path
# from funasr import AutoModel
#
# model = AutoModel(
#     model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
# )
#
# res = model.export(type="onnx", quantize=False)
# print(res)