From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交
---
fun_text_processing/text_normalization/run_evaluate.py | 49 +++++++++++++++++++++++++++++--------------------
1 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/fun_text_processing/text_normalization/run_evaluate.py b/fun_text_processing/text_normalization/run_evaluate.py
index b206940..470a961 100644
--- a/fun_text_processing/text_normalization/run_evaluate.py
+++ b/fun_text_processing/text_normalization/run_evaluate.py
@@ -1,5 +1,3 @@
-
-
from argparse import ArgumentParser
from fun_text_processing.text_normalization.data_loader_utils import (
@@ -12,18 +10,22 @@
from fun_text_processing.text_normalization.normalize import Normalizer
-'''
+"""
Runs Evaluation on data in the format of : <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
like the Google text normalization data https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
-'''
+"""
def parse_args():
parser = ArgumentParser()
parser.add_argument("--input", help="input file path", type=str)
- parser.add_argument("--lang", help="language", choices=['en'], default="en", type=str)
+ parser.add_argument("--lang", help="language", choices=["en"], default="en", type=str)
parser.add_argument(
- "--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str
+ "--input_case",
+ help="input capitalization",
+ choices=["lower_cased", "cased"],
+ default="cased",
+ type=str,
)
parser.add_argument(
"--cat",
@@ -33,7 +35,9 @@
default=None,
choices=known_types,
)
- parser.add_argument("--filter", action='store_true', help="clean data for normalization purposes")
+ parser.add_argument(
+ "--filter", action="store_true", help="clean data for normalization purposes"
+ )
return parser.parse_args()
@@ -41,7 +45,7 @@
# Example usage:
# python run_evaluate.py --input=<INPUT> --cat=<CATEGORY> --filter
args = parse_args()
- if args.lang == 'en':
+ if args.lang == "en":
from fun_text_processing.text_normalization.en.clean_eval_data import filter_loaded_data
file_path = args.input
normalizer = Normalizer(input_case=args.input_case, lang=args.lang)
@@ -76,30 +80,35 @@
preds=tokens_prediction, labels=tokens_normalized, input=tokens_un_normalized
)
print(" - Accuracy: " + str(token_accuracy[token_type]))
- token_count_per_type = {token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type}
+ token_count_per_type = {
+ token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type
+ }
token_weighted_accuracy = [
- token_count_per_type[token_type] * accuracy for token_type, accuracy in token_accuracy.items()
+ token_count_per_type[token_type] * accuracy
+ for token_type, accuracy in token_accuracy.items()
]
print("- Accuracy: " + str(sum(token_weighted_accuracy) / sum(token_count_per_type.values())))
- print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
+ print(" - Total: " + str(sum(token_count_per_type.values())), "\n")
- print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
+ print(" - Total: " + str(sum(token_count_per_type.values())), "\n")
for token_type in token_accuracy:
if token_type not in known_types:
raise ValueError("Unexpected token type: " + token_type)
if args.category is None:
- c1 = ['Class', 'sent level'] + known_types
- c2 = ['Num Tokens', len(sentences_normalized)] + [
- token_count_per_type[known_type] if known_type in tokens_per_type else '0' for known_type in known_types
+ c1 = ["Class", "sent level"] + known_types
+ c2 = ["Num Tokens", len(sentences_normalized)] + [
+ token_count_per_type[known_type] if known_type in tokens_per_type else "0"
+ for known_type in known_types
]
- c3 = ['Normalization', sentences_accuracy] + [
- token_accuracy[known_type] if known_type in token_accuracy else '0' for known_type in known_types
+ c3 = ["Normalization", sentences_accuracy] + [
+ token_accuracy[known_type] if known_type in token_accuracy else "0"
+ for known_type in known_types
]
for i in range(len(c1)):
- print(f'{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}')
+ print(f"{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}")
else:
- print(f'numbers\t{token_count_per_type[args.category]}')
- print(f'Normalization\t{token_accuracy[args.category]}')
+ print(f"numbers\t{token_count_per_type[args.category]}")
+ print(f"Normalization\t{token_accuracy[args.category]}")
--
Gitblit v1.9.1