From 3a4281f4959534b1bf5d01acf0085f4f8e6f2ec8 Mon Sep 17 00:00:00 2001
From: wuhongsheng <664116298@qq.com>
Date: 星期五, 05 七月 2024 00:55:32 +0800
Subject: [PATCH] 优化speakid和语句匹配逻辑,部分解决speakid不从0递增问题 (#1870)

---
 fun_text_processing/text_normalization/ru/verbalizers/ordinal.py |    4 +---
 1 files changed, 1 insertions(+), 3 deletions(-)

diff --git a/fun_text_processing/text_normalization/ru/verbalizers/ordinal.py b/fun_text_processing/text_normalization/ru/verbalizers/ordinal.py
index ad6c914..564d361 100644
--- a/fun_text_processing/text_normalization/ru/verbalizers/ordinal.py
+++ b/fun_text_processing/text_normalization/ru/verbalizers/ordinal.py
@@ -1,5 +1,3 @@
-
-
 import pynini
 from fun_text_processing.text_normalization.en.graph_utils import DAMO_NOT_QUOTE, GraphFst
 from pynini.lib import pynutil
@@ -19,6 +17,6 @@
         super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
 
         value = pynini.closure(DAMO_NOT_QUOTE)
-        graph = pynutil.delete("integer: \"") + value + pynutil.delete("\"")
+        graph = pynutil.delete('integer: "') + value + pynutil.delete('"')
         delete_tokens = self.delete_tokens(graph)
         self.fst = delete_tokens.optimize()

--
Gitblit v1.9.1