zhifu gao
2024-04-26 1cdb3cc28d4d89a576cc06e5cd8eb80da1f3a3aa
funasr/datasets/large_datasets/utils/filter.py
@@ -1,18 +1,27 @@
#!/usr/bin/env python
def filter(data,
           speech_length_min=100,
           speech_length_max=15000,
           token_length_min=0,
           token_length_max=200):
    assert "speech" in data
    assert "text" in data
def filter(
    data, speech_length_min=100, speech_length_max=15000, token_length_min=0, token_length_max=200
):
    assert "speech" in data or "text" in data
    if "sampling_rate" in data:
        speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
    if "speech" in data and "text" in data:
        if "sampling_rate" in data:
            speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.0
        else:
            speech_length = data["speech"].shape[0]
        num_tokens = len(data["text"])
        return (
            speech_length_min < speech_length < speech_length_max
            and token_length_min < num_tokens < token_length_max
        )
    elif "speech" in data:
        if "sampling_rate" in data:
            speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.0
        else:
            speech_length = data["speech"].shape[0]
        return speech_length_min < speech_length < speech_length_max
    else:
        speech_length = data["speech"].shape[0]
    num_tokens = len(data['text'])
    return speech_length_min < speech_length < speech_length_max and token_length_min < num_tokens < token_length_max
        num_tokens = len(data["text"])
        return token_length_min < num_tokens < token_length_max