zhifu gao
2024-05-14 2f27b165559cd53afab52047309ebe4ac838ebb8
fun_text_processing/inverse_text_normalization/run_evaluate.py
@@ -10,17 +10,21 @@
)
'''
"""
Runs Evaluation on data in the format of : <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
like the Google text normalization data https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
'''
"""
def parse_args():
    parser = ArgumentParser()
    parser.add_argument("--input", help="input file path", type=str)
    parser.add_argument(
        "--lang", help="language", choices=['en', 'id', 'ja', 'de', 'es', 'pt', 'ru', 'fr', 'vi', 'ko', 'zh', 'fil'], default="en", type=str
        "--lang",
        help="language",
        choices=["en", "id", "ja", "de", "es", "pt", "ru", "fr", "vi", "ko", "zh", "fil"],
        default="en",
        type=str,
    )
    parser.add_argument(
        "--cat",
@@ -30,7 +34,9 @@
        default=None,
        choices=known_types,
    )
    parser.add_argument("--filter", action='store_true', help="clean data for inverse normalization purposes")
    parser.add_argument(
        "--filter", action="store_true", help="clean data for inverse normalization purposes"
    )
    return parser.parse_args()
@@ -38,8 +44,10 @@
    # Example usage:
    # python run_evaluate.py --input=<INPUT> --cat=<CATEGORY> --filter
    args = parse_args()
    if args.lang == 'en':
        from fun_text_processing.inverse_text_normalization.en.clean_eval_data import filter_loaded_data
    if args.lang == "en":
        from fun_text_processing.inverse_text_normalization.en.clean_eval_data import (
            filter_loaded_data,
        )
    file_path = args.input
    inverse_normalizer = InverseNormalizer()
@@ -69,31 +77,38 @@
        print("  - Data: " + str(len(tokens_normalized)) + " tokens")
        tokens_prediction = inverse_normalizer.inverse_normalize_list(tokens_normalized)
        print("  - Denormalized. Evaluating...")
        token_accuracy[token_type] = evaluate(tokens_prediction, tokens_un_normalized, input=tokens_normalized)
        token_accuracy[token_type] = evaluate(
            tokens_prediction, tokens_un_normalized, input=tokens_normalized
        )
        print("  - Accuracy: " + str(token_accuracy[token_type]))
    token_count_per_type = {token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type}
    token_count_per_type = {
        token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type
    }
    token_weighted_accuracy = [
        token_count_per_type[token_type] * accuracy for token_type, accuracy in token_accuracy.items()
        token_count_per_type[token_type] * accuracy
        for token_type, accuracy in token_accuracy.items()
    ]
    print("- Accuracy: " + str(sum(token_weighted_accuracy) / sum(token_count_per_type.values())))
    print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
    print(" - Total: " + str(sum(token_count_per_type.values())), "\n")
    for token_type in token_accuracy:
        if token_type not in known_types:
            raise ValueError("Unexpected token type: " + token_type)
    if args.category is None:
        c1 = ['Class', 'sent level'] + known_types
        c2 = ['Num Tokens', len(sentences_normalized)] + [
            token_count_per_type[known_type] if known_type in tokens_per_type else '0' for known_type in known_types
        c1 = ["Class", "sent level"] + known_types
        c2 = ["Num Tokens", len(sentences_normalized)] + [
            token_count_per_type[known_type] if known_type in tokens_per_type else "0"
            for known_type in known_types
        ]
        c3 = ["Denormalization", sentences_accuracy] + [
            token_accuracy[known_type] if known_type in token_accuracy else '0' for known_type in known_types
            token_accuracy[known_type] if known_type in token_accuracy else "0"
            for known_type in known_types
        ]
        for i in range(len(c1)):
            print(f'{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}')
            print(f"{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}")
    else:
        print(f'numbers\t{token_count_per_type[args.category]}')
        print(f'Denormalization\t{token_accuracy[args.category]}')
        print(f"numbers\t{token_count_per_type[args.category]}")
        print(f"Denormalization\t{token_accuracy[args.category]}")