From 91eb42a390e143087eea261ea57a86b5fc8d32e8 Mon Sep 17 00:00:00 2001
From: 夜雨飘零 <yeyupiaoling@foxmail.com>
Date: 星期三, 13 九月 2023 09:30:38 +0800
Subject: [PATCH] add python http code (#940)

---
 funasr/runtime/python/http/client.py        |   34 ++++++++
 funasr/runtime/python/http/README.md        |   47 +++++++++++
 funasr/runtime/python/http/requirements.txt |    6 +
 funasr/runtime/python/http/server.py        |  107 ++++++++++++++++++++++++++
 4 files changed, 194 insertions(+), 0 deletions(-)

diff --git a/funasr/runtime/python/http/README.md b/funasr/runtime/python/http/README.md
new file mode 100644
index 0000000..5b3fbb3
--- /dev/null
+++ b/funasr/runtime/python/http/README.md
@@ -0,0 +1,47 @@
+# Service with http-python
+
+## Server
+
+1. Install requirements
+
+```shell
+cd funasr/runtime/python/http
+pip install -r requirements.txt
+```
+
+2. Start server
+
+```shell
+python server.py --port 8000
+```
+
+More parameters:
+```shell
+python server.py \
+--host [host ip] \
+--port [server port] \
+--asr_model [asr model_name] \
+--punc_model [punc model_name] \
+--ngpu [0 or 1] \
+--ncpu [1 or 4] \
+--certfile [path of certfile for ssl] \
+--keyfile [path of keyfile for ssl] \
+--temp_dir [upload file temp dir] 
+```
+
+## Client
+
+```shell
+# get test audio file
+wget https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav
+python client.py --host=127.0.0.1 --port=8000 --audio_path=asr_example_zh.wav
+```
+
+More parameters:
+```shell
+python server.py \
+--host [sever ip] \
+--port [sever port] \
+--add_pun [add pun to result] \
+--audio_path [use audio path] 
+```
diff --git a/funasr/runtime/python/http/client.py b/funasr/runtime/python/http/client.py
new file mode 100644
index 0000000..09e9eea
--- /dev/null
+++ b/funasr/runtime/python/http/client.py
@@ -0,0 +1,34 @@
+import requests
+import argparse
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--host",
+                    type=str,
+                    default="127.0.0.1",
+                    required=False,
+                    help="sever ip")
+parser.add_argument("--port",
+                    type=int,
+                    default=8000,
+                    required=False,
+                    help="server port")
+parser.add_argument("--add_pun",
+                    type=int,
+                    default=1,
+                    required=False,
+                    help="add pun to result")
+parser.add_argument("--audio_path",
+                    type=str,
+                    default='asr_example_zh.wav',
+                    required=False,
+                    help="use audio path")
+args = parser.parse_args()
+
+
+url = f'http://{args.host}:{args.port}/recognition'
+data = {'add_pun': args.add_pun}
+headers = {}
+files = [('audio', ('file', open(args.audio_path, 'rb'), 'application/octet-stream'))]
+
+response = requests.post(url, headers=headers, data=data, files=files)
+print(response.text)
diff --git a/funasr/runtime/python/http/requirements.txt b/funasr/runtime/python/http/requirements.txt
new file mode 100644
index 0000000..bf55b9e
--- /dev/null
+++ b/funasr/runtime/python/http/requirements.txt
@@ -0,0 +1,6 @@
+modelscope>=1.8.4
+fastapi>=0.95.1
+ffmpeg-python
+aiofiles
+uvicorn
+requests
\ No newline at end of file
diff --git a/funasr/runtime/python/http/server.py b/funasr/runtime/python/http/server.py
new file mode 100644
index 0000000..283cf0a
--- /dev/null
+++ b/funasr/runtime/python/http/server.py
@@ -0,0 +1,107 @@
+import argparse
+import logging
+import os
+import random
+import time
+
+import aiofiles
+import ffmpeg
+import uvicorn
+from fastapi import FastAPI, File, UploadFile, Body
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+from modelscope.utils.logger import get_logger
+
+logger = get_logger(log_level=logging.CRITICAL)
+logger.setLevel(logging.CRITICAL)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--host",
+                    type=str,
+                    default="0.0.0.0",
+                    required=False,
+                    help="host ip, localhost, 0.0.0.0")
+parser.add_argument("--port",
+                    type=int,
+                    default=8000,
+                    required=False,
+                    help="server port")
+parser.add_argument("--asr_model",
+                    type=str,
+                    default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+                    help="model from modelscope")
+parser.add_argument("--punc_model",
+                    type=str,
+                    default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
+                    help="model from modelscope")
+parser.add_argument("--ngpu",
+                    type=int,
+                    default=1,
+                    help="0 for cpu, 1 for gpu")
+parser.add_argument("--ncpu",
+                    type=int,
+                    default=4,
+                    help="cpu cores")
+parser.add_argument("--certfile",
+                    type=str,
+                    default=None,
+                    required=False,
+                    help="certfile for ssl")
+parser.add_argument("--keyfile",
+                    type=str,
+                    default=None,
+                    required=False,
+                    help="keyfile for ssl")
+parser.add_argument("--temp_dir",
+                    type=str,
+                    default="temp_dir/",
+                    required=False,
+                    help="temp dir")
+args = parser.parse_args()
+
+os.makedirs(args.temp_dir, exist_ok=True)
+
+print("model loading")
+# asr
+inference_pipeline_asr = pipeline(task=Tasks.auto_speech_recognition,
+                                  model=args.asr_model,
+                                  ngpu=args.ngpu,
+                                  ncpu=args.ncpu,
+                                  model_revision=None)
+print(f'loaded asr models.')
+
+if args.punc_model != "":
+    inference_pipeline_punc = pipeline(task=Tasks.punctuation,
+                                       model=args.punc_model,
+                                       model_revision="v1.0.2",
+                                       ngpu=args.ngpu,
+                                       ncpu=args.ncpu)
+    print(f'loaded pun models.')
+else:
+    inference_pipeline_punc = None
+
+app = FastAPI(title="FunASR")
+
+
+@app.post("/recognition")
+async def api_recognition(audio: UploadFile = File(..., description="audio file"),
+                          add_pun: int = Body(1, description="add punctuation", embed=True)):
+    suffix = audio.filename.split('.')[-1]
+    audio_path = f'{args.temp_dir}/{int(time.time() * 1000)}_{random.randint(100, 999)}.{suffix}'
+    async with aiofiles.open(audio_path, 'wb') as out_file:
+        content = await audio.read()
+        await out_file.write(content)
+    audio_bytes, _ = (
+        ffmpeg.input(audio_path, threads=0)
+        .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000)
+        .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
+    )
+    rec_result = inference_pipeline_asr(audio_in=audio_bytes, param_dict={})
+    if add_pun:
+        rec_result = inference_pipeline_punc(text_in=rec_result['text'], param_dict={'cache': list()})
+    ret = {"results": rec_result['text'], "code": 0}
+    return ret
+
+
+if __name__ == '__main__':
+    uvicorn.run(app, host=args.host, port=args.port, ssl_keyfile=args.keyfile, ssl_certfile=args.certfile)

--
Gitblit v1.9.1