From 6e5f075b1d9f189dd4e5400a0a228c670aa4696e Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 09 二月 2023 14:15:18 +0800
Subject: [PATCH] Merge pull request #80 from alibaba-damo-academy/dev

---
 funasr/datasets/large_datasets/dataset.py |    9 ++++++---
 1 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/funasr/datasets/large_datasets/dataset.py b/funasr/datasets/large_datasets/dataset.py
index 81c1361..55b0678 100644
--- a/funasr/datasets/large_datasets/dataset.py
+++ b/funasr/datasets/large_datasets/dataset.py
@@ -102,6 +102,8 @@
                 elif data_type == "text" or data_type == "sound":
                     text_reader = open(data_file, "r")
                     reader_list.append(text_reader)
+                elif data_type == "none":
+                    continue
                 else:
                     raise TypeError("Data type {} is not supported".format(data_type))
 
@@ -156,9 +158,10 @@
     filter_fn = partial(filter, **filter_conf)
     dataset = FilterIterDataPipe(dataset, fn=filter_fn)
 
-    vocab = {'vocab': dict, 'seg_dict': seg_dict}
-    tokenize_fn = partial(tokenize, **vocab)
-    dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
+    if "text" in data_names:
+        vocab = {'vocab': dict, 'seg_dict': seg_dict}
+        tokenize_fn = partial(tokenize, **vocab)
+        dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
 
     if shuffle:
         buffer_conf = conf.get('shuffle_conf', {})

--
Gitblit v1.9.1