From adcee8828ef5d78b575043954deb662a35e318f7 Mon Sep 17 00:00:00 2001
From: huangmingming <huangmingming@deepscience.cn>
Date: 星期一, 30 一月 2023 16:02:54 +0800
Subject: [PATCH] update the minimum size of audio

---
 funasr/datasets/large_datasets/utils/filter.py |   15 +++++++++------
 1 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/funasr/datasets/large_datasets/utils/filter.py b/funasr/datasets/large_datasets/utils/filter.py
index 5dc911f..91ba4be 100644
--- a/funasr/datasets/large_datasets/utils/filter.py
+++ b/funasr/datasets/large_datasets/utils/filter.py
@@ -2,14 +2,17 @@
 
 
 def filter(data,
-           min_length=10,
-           max_length=10000,
-           min_token_length=0,
-           max_token_length=200):
+           speech_length_min=100,
+           speech_length_max=15000,
+           token_length_min=0,
+           token_length_max=200):
     assert "speech" in data
     assert "text" in data
 
-    num_frames = data["speech"].shape[0]
+    if "sampling_rate" in data:
+        speech_length = (data["speech"].shape[0] / data["sampling_rate"]) * 1000.
+    else:
+        speech_length = data["speech"].shape[0]
     num_tokens = len(data['text'])
 
-    return min_length < num_frames < max_length and min_token_length < num_tokens < max_token_length
\ No newline at end of file
+    return speech_length_min < speech_length < speech_length_max and token_length_min < num_tokens < token_length_max

--
Gitblit v1.9.1