From 30aa982bf29ceefaf52c0013c12c19adc57dea0e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 27 四月 2023 21:11:04 +0800
Subject: [PATCH] docs
---
egs_modelscope/tp/TEMPLATE/README.md | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/egs_modelscope/tp/TEMPLATE/README.md b/egs_modelscope/tp/TEMPLATE/README.md
index 8d75581..d33d4e6 100644
--- a/egs_modelscope/tp/TEMPLATE/README.md
+++ b/egs_modelscope/tp/TEMPLATE/README.md
@@ -8,12 +8,12 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
-inference_pipline = pipeline(
+inference_pipeline = pipeline(
task=Tasks.speech_timestamp,
model='damo/speech_timestamp_prediction-v1-16k-offline',
output_dir=None)
-rec_result = inference_pipline(
+rec_result = inference_pipeline(
audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav',
text_in='涓� 涓� 涓� 澶� 骞� 娲� 鍥� 瀹� 涓� 浠� 涔� 璺� 鍒� 瑗� 澶� 骞� 娲� 鏉� 浜� 鍛�',)
print(rec_result)
@@ -23,15 +23,15 @@
-#### API-reference
-##### Define pipeline
+### API-reference
+#### Define pipeline
- `task`: `Tasks.speech_timestamp`
- `model`: model name in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope), or model path in local disk
- `ngpu`: `1` (Default), decoding on GPU. If ngpu=0, decoding on CPU
- `ncpu`: `1` (Default), sets the number of threads used for intraop parallelism on CPU
- `output_dir`: `None` (Default), the output path of results if set
- `batch_size`: `1` (Default), batch size when decoding
-##### Infer pipeline
+#### Infer pipeline
- `audio_in`: the input speech to predict, which could be:
- wav_path, `e.g.`: asr_example.wav (wav in local or url),
- wav.scp, kaldi style wav list (`wav_id wav_path`), `e.g.`:
--
Gitblit v1.9.1