Dev gzf (#1474)
* qwenaudio qwenaudiochat
* qwenaudio qwenaudiochat
* whisper
* whisper
* llm
* llm
* llm
* llm
* llm
* llm
* llm
* llm
* export onnx
* export onnx
* export onnx
* dingding
* dingding
* llm
* doc
* onnx
* onnx
* onnx
* onnx
* onnx
* onnx
* v1.0.15
* qwenaudio
| | |
| | | res = model.export(quantize=False) |
| | | ``` |
| | | |
| | | ### Text ONNX |
| | | ```python |
| | | # pip3 install -U funasr-onnx |
| | | from funasr_onnx import Paraformer |
| | | model_dir = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | model = Paraformer(model_dir, batch_size=1, quantize=True) |
| | | |
| | | wav_path = ['~/.cache/modelscope/hub/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav'] |
| | | |
| | | result = model(wav_path) |
| | | print(result) |
| | | ``` |
| | | |
| | | More examples ref to [demo](runtime/python/onnxruntime) |
| | | |
| | | ## Deployment Service |
| | | FunASR supports deploying pre-trained or further fine-tuned models for service. Currently, it supports the following types of service deployment: |
| | | - File transcription service, Mandarin, CPU version, done |
| | |
| | | funasr-export ++model=paraformer ++quantize=false |
| | | ``` |
| | | |
| | | ### 从python指令导出 |
| | | ### 从Python导出 |
| | | ```python |
| | | from funasr import AutoModel |
| | | |
| | |
| | | res = model.export(quantize=False) |
| | | ``` |
| | | |
| | | ### 测试ONNX |
| | | ```python |
| | | # pip3 install -U funasr-onnx |
| | | from funasr_onnx import Paraformer |
| | | model_dir = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | model = Paraformer(model_dir, batch_size=1, quantize=True) |
| | | |
| | | wav_path = ['~/.cache/modelscope/hub/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav'] |
| | | |
| | | result = model(wav_path) |
| | | print(result) |
| | | ``` |
| | | |
| | | 更多例子请参考 [样例](runtime/python/onnxruntime) |
| | | |
| | | <a name="服务部署"></a> |
| | | ## 服务部署 |
| | |
| | | if "file_path_metas" in conf_json: |
| | | add_file_root_path(model_or_path, conf_json["file_path_metas"], cfg) |
| | | cfg.update(kwargs) |
| | | if "config" in cfg: |
| | | config = OmegaConf.load(cfg["config"]) |
| | | kwargs = OmegaConf.merge(config, cfg) |
| | | kwargs["model"] = config["model"] |
| | |
| | | "\npip3 install -U funasr -i https://mirror.sjtu.edu.cn/pypi/web/simple" |
| | | |
| | | model = AutoModel(model=model_dir) |
| | | model_dir = model.export(type="onnx", quantize=quantize) |
| | | model_dir = model.export(quantize=quantize) |
| | | |
| | | config_file = os.path.join(model_dir, 'punc.yaml') |
| | | config_file = os.path.join(model_dir, 'confi.yaml') |
| | | config = read_yaml(config_file) |
| | | token_list = os.path.join(model_dir, 'tokens.json') |
| | | with open(token_list, 'r', encoding='utf-8') as f: |
| | |
| | | self.converter = TokenIDConverter(token_list) |
| | | self.ort_infer = OrtInferSession(model_file, device_id, intra_op_num_threads=intra_op_num_threads) |
| | | self.batch_size = 1 |
| | | self.punc_list = config['punc_list'] |
| | | self.punc_list = config["model_conf"]['punc_list'] |
| | | self.period = 0 |
| | | for i in range(len(self.punc_list)): |
| | | if self.punc_list[i] == ",": |
| | |
| | | self.punc_list[i] = "?" |
| | | elif self.punc_list[i] == "。": |
| | | self.period = i |
| | | if "seg_jieba" in config: |
| | | self.seg_jieba = True |
| | | self.jieba_usr_dict_path = os.path.join(model_dir, 'jieba_usr_dict') |
| | | if os.path.exists(self.jieba_usr_dict_path): |
| | | self.seg_jieba = True |
| | | self.code_mix_split_words_jieba = code_mix_split_words_jieba(self.jieba_usr_dict_path) |
| | | else: |
| | | self.seg_jieba = False |
| | |
| | | intra_op_num_threads: int = 4, |
| | | cache_dir: str = None |
| | | ): |
| | | super(CT_Transformer_VadRealtime, self).__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads, cache_dir=cache_dir) |
| | | super().__init__(model_dir, batch_size, device_id, quantize, intra_op_num_threads, cache_dir=cache_dir) |
| | | |
| | | def __call__(self, text: str, param_dict: map, split_size=20): |
| | | cache_key = "cache" |
| | |
| | | |
| | | |
| | | MODULE_NAME = 'funasr_onnx' |
| | | VERSION_NUM = '0.2.5' |
| | | VERSION_NUM = '0.3.0' |
| | | |
| | | setuptools.setup( |
| | | name=MODULE_NAME, |