From 33d3d2084403fd34b79c835d2f2fe04f6cd8f738 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 13 九月 2023 09:33:54 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add

---
 funasr/bin/punc_infer.py |   93 +++++++++++++++++++++++++++-------------------
 1 files changed, 54 insertions(+), 39 deletions(-)

diff --git a/funasr/bin/punc_infer.py b/funasr/bin/punc_infer.py
index 41c4da3..9efeb5b 100644
--- a/funasr/bin/punc_infer.py
+++ b/funasr/bin/punc_infer.py
@@ -1,42 +1,33 @@
 #!/usr/bin/env python3
-import argparse
-import logging
-from pathlib import Path
-import sys
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 from typing import Optional
-from typing import Sequence
-from typing import Tuple
 from typing import Union
-from typing import Any
-from typing import List
 
 import numpy as np
 import torch
-from typeguard import check_argument_types
+import os
 
+from funasr.build_utils.build_model_from_file import build_model_from_file
 from funasr.datasets.preprocessor import CodeMixTokenizerCommonPreprocessor
-from funasr.utils.cli_utils import get_commandline_args
-from funasr.tasks.punctuation import PunctuationTask
+from funasr.datasets.preprocessor import split_to_mini_sentence
 from funasr.torch_utils.device_funcs import to_device
 from funasr.torch_utils.forward_adaptor import ForwardAdaptor
-from funasr.torch_utils.set_all_random_seed import set_all_random_seed
-from funasr.utils import config_argparse
-from funasr.utils.types import str2triple_str
-from funasr.utils.types import str_or_none
-from funasr.datasets.preprocessor import split_to_mini_sentence
 
 
 class Text2Punc:
 
     def __init__(
-        self,
-        train_config: Optional[str],
-        model_file: Optional[str],
-        device: str = "cpu",
-        dtype: str = "float32",
+            self,
+            train_config: Optional[str],
+            model_file: Optional[str],
+            device: str = "cpu",
+            dtype: str = "float32",
     ):
         #  Build Model
-        model, train_args = PunctuationTask.build_model_from_file(train_config, model_file, device)
+        model, train_args = build_model_from_file(train_config, model_file, None, device, task_name="punc")
         self.device = device
         # Wrape model to make model.nll() data-parallel
         self.wrapped_model = ForwardAdaptor(model, "inference")
@@ -51,6 +42,11 @@
                 self.punc_list[i] = "锛�"
             elif self.punc_list[i] == "銆�":
                 self.period = i
+        self.seg_dict_file = None
+        self.seg_jieba = False
+        if "seg_jieba" in train_args:
+            self.seg_jieba = train_args.seg_jieba
+            self.seg_dict_file = os.path.dirname(model_file)+"/"+ "jieba_usr_dict"
         self.preprocessor = CodeMixTokenizerCommonPreprocessor(
             train=False,
             token_type=train_args.token_type,
@@ -60,6 +56,8 @@
             g2p_type=train_args.g2p,
             text_name="text",
             non_linguistic_symbols=train_args.non_linguistic_symbols,
+            seg_jieba=self.seg_jieba,
+            seg_dict_file=self.seg_dict_file
         )
 
     @torch.no_grad()
@@ -119,12 +117,25 @@
             new_mini_sentence_punc += [int(x) for x in punctuations_np]
             words_with_punc = []
             for i in range(len(mini_sentence)):
+                if (i==0 or self.punc_list[punctuations[i-1]] == "銆�" or self.punc_list[punctuations[i-1]] == "锛�") and len(mini_sentence[i][0].encode()) == 1:
+                    mini_sentence[i] = mini_sentence[i].capitalize()
+                if i == 0:
+                    if len(mini_sentence[i][0].encode()) == 1:
+                        mini_sentence[i] = " " + mini_sentence[i]
                 if i > 0:
                     if len(mini_sentence[i][0].encode()) == 1 and len(mini_sentence[i - 1][0].encode()) == 1:
                         mini_sentence[i] = " " + mini_sentence[i]
                 words_with_punc.append(mini_sentence[i])
                 if self.punc_list[punctuations[i]] != "_":
-                    words_with_punc.append(self.punc_list[punctuations[i]])
+                    punc_res = self.punc_list[punctuations[i]]
+                    if len(mini_sentence[i][0].encode()) == 1:
+                        if punc_res == "锛�":
+                            punc_res = ","
+                        elif punc_res == "銆�":
+                            punc_res = "."
+                        elif punc_res == "锛�":
+                            punc_res = "?"
+                    words_with_punc.append(punc_res)
             new_mini_sentence += "".join(words_with_punc)
             # Add Period for the end of the sentence
             new_mini_sentence_out = new_mini_sentence
@@ -133,23 +144,29 @@
                 if new_mini_sentence[-1] == "锛�" or new_mini_sentence[-1] == "銆�":
                     new_mini_sentence_out = new_mini_sentence[:-1] + "銆�"
                     new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
-                elif new_mini_sentence[-1] != "銆�" and new_mini_sentence[-1] != "锛�":
+                elif new_mini_sentence[-1] == ",":
+                    new_mini_sentence_out = new_mini_sentence[:-1] + "."
+                    new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
+                elif new_mini_sentence[-1] != "銆�" and new_mini_sentence[-1] != "锛�" and len(new_mini_sentence[-1].encode())==0:
                     new_mini_sentence_out = new_mini_sentence + "銆�"
+                    new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
+                elif new_mini_sentence[-1] != "." and new_mini_sentence[-1] != "?" and len(new_mini_sentence[-1].encode())==1:
+                    new_mini_sentence_out = new_mini_sentence + "."
                     new_mini_sentence_punc_out = new_mini_sentence_punc[:-1] + [self.period]
         return new_mini_sentence_out, new_mini_sentence_punc_out
 
 
 class Text2PuncVADRealtime:
-    
+
     def __init__(
-        self,
-        train_config: Optional[str],
-        model_file: Optional[str],
-        device: str = "cpu",
-        dtype: str = "float32",
+            self,
+            train_config: Optional[str],
+            model_file: Optional[str],
+            device: str = "cpu",
+            dtype: str = "float32",
     ):
         #  Build Model
-        model, train_args = PunctuationTask.build_model_from_file(train_config, model_file, device)
+        model, train_args = build_model_from_file(train_config, model_file, None, device, task_name="punc")
         self.device = device
         # Wrape model to make model.nll() data-parallel
         self.wrapped_model = ForwardAdaptor(model, "inference")
@@ -174,7 +191,7 @@
             text_name="text",
             non_linguistic_symbols=train_args.non_linguistic_symbols,
         )
-    
+
     @torch.no_grad()
     def __call__(self, text: Union[list, str], cache: list, split_size=20):
         if cache is not None and len(cache) > 0:
@@ -211,7 +228,7 @@
             if indices.size()[0] != 1:
                 punctuations = torch.squeeze(indices)
             assert punctuations.size()[0] == len(mini_sentence)
-            
+
             # Search for the last Period/QuestionMark as cache
             if mini_sentence_i < len(mini_sentences) - 1:
                 sentenceEnd = -1
@@ -222,7 +239,7 @@
                         break
                     if last_comma_index < 0 and self.punc_list[punctuations[i]] == "锛�":
                         last_comma_index = i
-                
+
                 if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
                     # The sentence it too long, cut off at a comma.
                     sentenceEnd = last_comma_index
@@ -231,11 +248,11 @@
                 cache_sent_id = mini_sentence_id[sentenceEnd + 1:]
                 mini_sentence = mini_sentence[0:sentenceEnd + 1]
                 punctuations = punctuations[0:sentenceEnd + 1]
-            
+
             punctuations_np = punctuations.cpu().numpy()
             sentence_punc_list += [self.punc_list[int(x)] for x in punctuations_np]
             sentence_words_list += mini_sentence
-        
+
         assert len(sentence_punc_list) == len(sentence_words_list)
         words_with_punc = []
         sentence_punc_list_out = []
@@ -252,7 +269,7 @@
                 if sentence_punc_list[i] != "_":
                     words_with_punc.append(sentence_punc_list[i])
         sentence_out = "".join(words_with_punc)
-        
+
         sentenceEnd = -1
         for i in range(len(sentence_punc_list) - 2, 1, -1):
             if sentence_punc_list[i] == "銆�" or sentence_punc_list[i] == "锛�":
@@ -263,5 +280,3 @@
             sentence_out = sentence_out[:-1]
             sentence_punc_list_out[-1] = "_"
         return sentence_out, sentence_punc_list_out, cache_out
-
-

--
Gitblit v1.9.1