From c6574bf4f4390a9c0dfdd5aa2c4f39eb85e1557e Mon Sep 17 00:00:00 2001 From: Shi Xian <40013335+R1ckShi@users.noreply.github.com> Date: 星期二, 09 四月 2024 10:41:11 +0800 Subject: [PATCH] Merge pull request #1596 from wanghuii1/main --- funasr/datasets/large_datasets/utils/filter.py | 26 ++++++++++++++++++++++++++ 1 files changed, 26 insertions(+), 0 deletions(-) diff --git a/funasr/datasets/large_datasets/utils/filter.py b/funasr/datasets/large_datasets/utils/filter.py new file mode 100644 index 0000000..1260a47 --- /dev/null +++ b/funasr/datasets/large_datasets/utils/filter.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python + + +def filter(data, + speech_length_min=100, + speech_length_max=15000, + token_length_min=0, + token_length_max=200): + assert "speech" in data or "text" in data + + if "speech" in data and "text" in data: + if "sampling_rate" in data: + speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000. + else: + speech_length = data["speech"].shape[0] + num_tokens = len(data['text']) + return speech_length_min < speech_length < speech_length_max and token_length_min < num_tokens < token_length_max + elif "speech" in data: + if "sampling_rate" in data: + speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000. + else: + speech_length = data["speech"].shape[0] + return speech_length_min < speech_length < speech_length_max + else: + num_tokens = len(data['text']) + return token_length_min < num_tokens < token_length_max -- Gitblit v1.9.1