From 28a19dbc4e85d3b8a4ec2ef7483bba64d422b43f Mon Sep 17 00:00:00 2001
From: aky15 <ankeyu.aky@11.17.44.249>
Date: 星期三, 12 四月 2023 18:03:06 +0800
Subject: [PATCH] Merge remote-tracking branch 'origin/main' into dev_aky

---
 funasr/bin/asr_inference_launch.py |  146 ++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 134 insertions(+), 12 deletions(-)

diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
old mode 100755
new mode 100644
index 9d328ad..53eee64
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -6,6 +6,7 @@
 import logging
 import os
 import sys
+from typing import Union, Dict, Any
 
 from funasr.utils import config_argparse
 from funasr.utils.cli_utils import get_commandline_args
@@ -75,6 +76,21 @@
 
     group = parser.add_argument_group("The model configuration related")
     group.add_argument(
+        "--vad_infer_config",
+        type=str,
+        help="VAD infer configuration",
+    )
+    group.add_argument(
+        "--vad_model_file",
+        type=str,
+        help="VAD model parameter file",
+    )
+    group.add_argument(
+        "--cmvn_file",
+        type=str,
+        help="Global CMVN file",
+    )
+    group.add_argument(
         "--asr_train_config",
         type=str,
         help="ASR training configuration",
@@ -115,6 +131,11 @@
         help="Pretrained model tag. If specify this option, *_train_config and "
              "*_file will be overwritten",
     )
+    group.add_argument(
+        "--beam_search_config",
+        default={},
+        help="The keyword arguments for transducer beam search.",
+    )
 
     group = parser.add_argument_group("Beam-search related")
     group.add_argument(
@@ -146,12 +167,47 @@
     group.add_argument(
         "--ctc_weight",
         type=float,
-        default=0.5,
+        default=0.0,
         help="CTC weight in joint decoding",
     )
     group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
     group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
     group.add_argument("--streaming", type=str2bool, default=False)
+    group.add_argument("--simu_streaming", type=str2bool, default=False)
+    group.add_argument("--chunk_size", type=int, default=16)
+    group.add_argument("--left_context", type=int, default=16)
+    group.add_argument("--right_context", type=int, default=0)
+    group.add_argument(
+        "--display_partial_hypotheses",
+        type=bool,
+        default=False,
+        help="Whether to display partial hypotheses during chunk-by-chunk inference.",
+    )    
+   
+    group = parser.add_argument_group("Dynamic quantization related")
+    group.add_argument(
+        "--quantize_asr_model",
+        type=bool,
+        default=False,
+        help="Apply dynamic quantization to ASR model.",
+    )
+    group.add_argument(
+        "--quantize_modules",
+        nargs="*",
+        default=None,
+        help="""Module names to apply dynamic quantization on.
+        The module names are provided as a list, where each name is separated
+        by a comma (e.g.: --quantize-config=[Linear,LSTM,GRU]).
+        Each specified name should be an attribute of 'torch.nn', e.g.:
+        torch.nn.Linear, torch.nn.LSTM, torch.nn.GRU, ...""",
+    )
+    group.add_argument(
+        "--quantize_dtype",
+        type=str,
+        default="qint8",
+        choices=["float16", "qint8"],
+        help="Dtype for dynamic quantization.",
+    )    
 
     group = parser.add_argument_group("Text converter related")
     group.add_argument(
@@ -181,6 +237,82 @@
     return parser
 
 
+
+def inference_launch(**kwargs):
+    if 'mode' in kwargs:
+        mode = kwargs['mode']
+    else:
+        logging.info("Unknown decoding mode.")
+        return None
+    if mode == "asr":
+        from funasr.bin.asr_inference import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "uniasr":
+        from funasr.bin.asr_inference_uniasr import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "uniasr_vad":
+        from funasr.bin.asr_inference_uniasr_vad import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "paraformer":
+        from funasr.bin.asr_inference_paraformer import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "paraformer_streaming":
+        from funasr.bin.asr_inference_paraformer_streaming import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "paraformer_vad":
+        from funasr.bin.asr_inference_paraformer_vad import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "paraformer_punc":
+        logging.info("Unknown decoding mode: {}".format(mode))
+        return None
+    elif mode == "paraformer_vad_punc":
+        from funasr.bin.asr_inference_paraformer_vad_punc import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "vad":
+        from funasr.bin.vad_inference import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "mfcca":
+        from funasr.bin.asr_inference_mfcca import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "rnnt":
+        from funasr.bin.asr_inference_rnnt import inference_modelscope
+        return inference_modelscope(**kwargs)
+    else:
+        logging.info("Unknown decoding mode: {}".format(mode))
+        return None
+
+def inference_launch_funasr(**kwargs):
+    if 'mode' in kwargs:
+        mode = kwargs['mode']
+    else:
+        logging.info("Unknown decoding mode.")
+        return None
+    if mode == "asr":
+        from funasr.bin.asr_inference import inference
+        return inference(**kwargs)
+    elif mode == "uniasr":
+        from funasr.bin.asr_inference_uniasr import inference
+        return inference(**kwargs)
+    elif mode == "paraformer":
+        from funasr.bin.asr_inference_paraformer import inference
+        return inference(**kwargs)
+    elif mode == "paraformer_vad_punc":
+        from funasr.bin.asr_inference_paraformer_vad_punc import inference
+        return inference(**kwargs)
+    elif mode == "vad":
+        from funasr.bin.vad_inference import inference
+        return inference(**kwargs)
+    elif mode == "mfcca":
+        from funasr.bin.asr_inference_mfcca import inference_modelscope
+        return inference_modelscope(**kwargs)
+    elif mode == "rnnt":
+        from funasr.bin.asr_inference_rnnt import inference
+        return inference(**kwargs)
+    else:
+        logging.info("Unknown decoding mode: {}".format(mode))
+        return None
+
+
 def main(cmd=None):
     print(get_commandline_args(), file=sys.stderr)
     parser = get_parser()
@@ -208,17 +340,7 @@
         os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
         os.environ["CUDA_VISIBLE_DEVICES"] = gpuid
 
-    if args.mode == "asr":
-        from funasr.bin.asr_inference import inference
-        inference(**kwargs)
-    elif args.mode == "uniasr":
-        from funasr.bin.asr_inference_uniasr import inference
-        inference(**kwargs)
-    elif args.mode == "paraformer":
-        from funasr.bin.asr_inference_paraformer import inference
-        inference(**kwargs)
-    else:
-        logging.info("Unknown decoding mode: {}".format(args.mode))
+    inference_launch_funasr(**kwargs)
 
 
 if __name__ == "__main__":

--
Gitblit v1.9.1