From 9b4e9cc8a0311e5243d69b73ed073e7ea441982e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 三月 2024 16:05:29 +0800
Subject: [PATCH] train update

---
 funasr/datasets/large_datasets/utils/filter.py |   26 ++++++++++++++++++++++++++
 1 files changed, 26 insertions(+), 0 deletions(-)

diff --git a/funasr/datasets/large_datasets/utils/filter.py b/funasr/datasets/large_datasets/utils/filter.py
new file mode 100644
index 0000000..1260a47
--- /dev/null
+++ b/funasr/datasets/large_datasets/utils/filter.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+
+def filter(data,
+           speech_length_min=100,
+           speech_length_max=15000,
+           token_length_min=0,
+           token_length_max=200):
+    assert "speech" in data or "text" in data
+
+    if "speech" in data and "text" in data:
+        if "sampling_rate" in data:
+            speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
+        else:
+            speech_length = data["speech"].shape[0]
+        num_tokens = len(data['text'])
+        return speech_length_min < speech_length < speech_length_max and token_length_min < num_tokens < token_length_max
+    elif "speech" in data:
+        if "sampling_rate" in data:
+            speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
+        else:
+            speech_length = data["speech"].shape[0]
+        return speech_length_min < speech_length < speech_length_max
+    else:
+        num_tokens = len(data['text'])
+        return token_length_min < num_tokens < token_length_max

--
Gitblit v1.9.1