From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交
---
fun_text_processing/inverse_text_normalization/run_evaluate.py | 58 ++++++++++++++++++++++++++++++++++------------------------
1 files changed, 34 insertions(+), 24 deletions(-)
diff --git a/fun_text_processing/inverse_text_normalization/run_evaluate.py b/fun_text_processing/inverse_text_normalization/run_evaluate.py
index 7fb4a55..bea92fa 100644
--- a/fun_text_processing/inverse_text_normalization/run_evaluate.py
+++ b/fun_text_processing/inverse_text_normalization/run_evaluate.py
@@ -9,18 +9,20 @@
training_data_to_tokens,
)
-
-'''
+"""
Runs Evaluation on data in the format of : <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
like the Google text normalization data https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
-'''
-
+"""
def parse_args():
parser = ArgumentParser()
- parser.add_argument("--input", help="input file path", type=str)
+ parser.add_argument("--input", help="input file path", type=str, required=True)
parser.add_argument(
- "--lang", help="language", choices=['en', 'id', 'ja', 'de', 'es', 'pt', 'ru', 'fr', 'vi', 'ko', 'zh', 'fil'], default="en", type=str
+ "--lang",
+ help="language",
+ choices=["en", "id", "ja", "de", "es", "pt", "ru", "fr", "vi", "ko", "zh", "fil"],
+ default="en",
+ type=str,
)
parser.add_argument(
"--cat",
@@ -30,16 +32,18 @@
default=None,
choices=known_types,
)
- parser.add_argument("--filter", action='store_true', help="clean data for inverse normalization purposes")
+ parser.add_argument(
+ "--filter", action="store_true", help="clean data for inverse normalization purposes"
+ )
return parser.parse_args()
-
if __name__ == "__main__":
# Example usage:
# python run_evaluate.py --input=<INPUT> --cat=<CATEGORY> --filter
args = parse_args()
- if args.lang == 'en':
+ if args.lang == "en":
from fun_text_processing.inverse_text_normalization.en.clean_eval_data import filter_loaded_data
+
file_path = args.input
inverse_normalizer = InverseNormalizer()
@@ -49,6 +53,7 @@
if args.filter:
training_data = filter_loaded_data(training_data)
+ # Evaluate at sentence level if no specific category is provided
if args.category is None:
print("Sentence level evaluation...")
sentences_un_normalized, sentences_normalized, _ = training_data_to_sentences(training_data)
@@ -60,40 +65,45 @@
)
print("- Accuracy: " + str(sentences_accuracy))
+ # Evaluate at token level
print("Token level evaluation...")
tokens_per_type = training_data_to_tokens(training_data, category=args.category)
token_accuracy = {}
- for token_type in tokens_per_type:
+ for token_type, (tokens_un_normalized, tokens_normalized) in tokens_per_type.items():
print("- Token type: " + token_type)
- tokens_un_normalized, tokens_normalized = tokens_per_type[token_type]
print(" - Data: " + str(len(tokens_normalized)) + " tokens")
tokens_prediction = inverse_normalizer.inverse_normalize_list(tokens_normalized)
print(" - Denormalized. Evaluating...")
- token_accuracy[token_type] = evaluate(tokens_prediction, tokens_un_normalized, input=tokens_normalized)
+ token_accuracy[token_type] = evaluate(
+ tokens_prediction, tokens_un_normalized, input=tokens_normalized
+ )
print(" - Accuracy: " + str(token_accuracy[token_type]))
- token_count_per_type = {token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type}
+
+ # Calculate weighted token accuracy
+ token_count_per_type = {token_type: len(tokens) for token_type, (tokens, _) in tokens_per_type.items()}
token_weighted_accuracy = [
- token_count_per_type[token_type] * accuracy for token_type, accuracy in token_accuracy.items()
+ token_count_per_type[token_type] * accuracy
+ for token_type, accuracy in token_accuracy.items()
]
print("- Accuracy: " + str(sum(token_weighted_accuracy) / sum(token_count_per_type.values())))
- print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
+ print(" - Total: " + str(sum(token_count_per_type.values())), "\n")
for token_type in token_accuracy:
if token_type not in known_types:
raise ValueError("Unexpected token type: " + token_type)
+ # Output table summarizing evaluation results if no specific category is provided
if args.category is None:
- c1 = ['Class', 'sent level'] + known_types
- c2 = ['Num Tokens', len(sentences_normalized)] + [
- token_count_per_type[known_type] if known_type in tokens_per_type else '0' for known_type in known_types
+ c1 = ["Class", "sent level"] + known_types
+ c2 = ["Num Tokens", len(sentences_normalized)] + [
+ str(token_count_per_type.get(known_type, 0)) for known_type in known_types
]
- c3 = ["Denormalization", sentences_accuracy] + [
- token_accuracy[known_type] if known_type in token_accuracy else '0' for known_type in known_types
+ c3 = ["Denormalization", str(sentences_accuracy)] + [
+ str(token_accuracy.get(known_type, "0")) for known_type in known_types
]
-
for i in range(len(c1)):
- print(f'{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}')
+ print(f"{c1[i]:10s} | {c2[i]:10s} | {c3[i]:5s}")
else:
- print(f'numbers\t{token_count_per_type[args.category]}')
- print(f'Denormalization\t{token_accuracy[args.category]}')
+ print(f"numbers\t{token_count_per_type[args.category]}")
+ print(f"Denormalization\t{token_accuracy[args.category]}")
--
Gitblit v1.9.1