| | |
| | | device="cpu", |
| | | ) |
| | | |
| | | res = model.export(type="onnx", quantize=False) |
| | | res = model.export(type="torchscripts", quantize=False) |
| | | print(res) |
| | | |
| | | |
| | | # method2, inference from local path |
| | | from funasr import AutoModel |
| | | # # method2, inference from local path |
| | | # from funasr import AutoModel |
| | | |
| | | model = AutoModel( |
| | | model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", |
| | | device="cpu", |
| | | ) |
| | | # model = AutoModel( |
| | | # model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", |
| | | # device="cpu", |
| | | # ) |
| | | |
| | | res = model.export(type="onnx", quantize=False) |
| | | print(res) |
| | | # res = model.export(type="onnx", quantize=False) |
| | | # print(res) |
| | |
| | | ) |
| | | elif type == 'torchscripts': |
| | | device = 'cuda' if torch.cuda.is_available() else 'cpu' |
| | | print("Exporting torchscripts on device {}".format(device)) |
| | | _torchscripts( |
| | | m, |
| | | path=export_dir, |
| | |
| | | START_END_THRESHOLD = 5 |
| | | MAX_TOKEN_DURATION = 30 |
| | | TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled |
| | | cif_peak = us_cif_peak.reshape(-1) |
| | | cif_peak = us_cif_peak.reshape(-1).cpu() |
| | | num_frames = cif_peak.shape[-1] |
| | | if char_list[-1] == "</s>": |
| | | char_list = char_list[:-1] |