From 1cdb3cc28d4d89a576cc06e5cd8eb80da1f3a3aa Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 26 四月 2024 11:27:39 +0800
Subject: [PATCH] Dev gzf exp (#1665)

---
 funasr/datasets/large_datasets/utils/filter.py |   35 ++++++++++++++++++++++-------------
 1 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/funasr/datasets/large_datasets/utils/filter.py b/funasr/datasets/large_datasets/utils/filter.py
index 91ba4be..adc8fa0 100644
--- a/funasr/datasets/large_datasets/utils/filter.py
+++ b/funasr/datasets/large_datasets/utils/filter.py
@@ -1,18 +1,27 @@
 #!/usr/bin/env python
 
 
-def filter(data,
-           speech_length_min=100,
-           speech_length_max=15000,
-           token_length_min=0,
-           token_length_max=200):
-    assert "speech" in data
-    assert "text" in data
+def filter(
+    data, speech_length_min=100, speech_length_max=15000, token_length_min=0, token_length_max=200
+):
+    assert "speech" in data or "text" in data
 
-    if "sampling_rate" in data:
-        speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
+    if "speech" in data and "text" in data:
+        if "sampling_rate" in data:
+            speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.0
+        else:
+            speech_length = data["speech"].shape[0]
+        num_tokens = len(data["text"])
+        return (
+            speech_length_min < speech_length < speech_length_max
+            and token_length_min < num_tokens < token_length_max
+        )
+    elif "speech" in data:
+        if "sampling_rate" in data:
+            speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.0
+        else:
+            speech_length = data["speech"].shape[0]
+        return speech_length_min < speech_length < speech_length_max
     else:
-        speech_length = data["speech"].shape[0]
-    num_tokens = len(data['text'])
-
-    return speech_length_min < speech_length < speech_length_max and token_length_min < num_tokens < token_length_max
+        num_tokens = len(data["text"])
+        return token_length_min < num_tokens < token_length_max

--
Gitblit v1.9.1