From ded881802c82190681a1f37caf1edefb16e67491 Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期二, 07 二月 2023 10:17:52 +0800
Subject: [PATCH] update data2vec pretrain

---
 funasr/datasets/large_datasets/dataset.py         |    7 ++++---
 funasr/datasets/large_datasets/datapipes/batch.py |    1 +
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/funasr/datasets/large_datasets/datapipes/batch.py b/funasr/datasets/large_datasets/datapipes/batch.py
index c980ae3..8ec43e9 100644
--- a/funasr/datasets/large_datasets/datapipes/batch.py
+++ b/funasr/datasets/large_datasets/datapipes/batch.py
@@ -46,6 +46,7 @@
         batch = []
         bucket = []
         max_lengths = 0
+        min_lengths = 999999
         batch_lengths = 0
 
         if self.batch_mode == "clipping":
diff --git a/funasr/datasets/large_datasets/dataset.py b/funasr/datasets/large_datasets/dataset.py
index 2d3ffd5..55b0678 100644
--- a/funasr/datasets/large_datasets/dataset.py
+++ b/funasr/datasets/large_datasets/dataset.py
@@ -158,9 +158,10 @@
     filter_fn = partial(filter, **filter_conf)
     dataset = FilterIterDataPipe(dataset, fn=filter_fn)
 
-    vocab = {'vocab': dict, 'seg_dict': seg_dict}
-    tokenize_fn = partial(tokenize, **vocab)
-    dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
+    if "text" in data_names:
+        vocab = {'vocab': dict, 'seg_dict': seg_dict}
+        tokenize_fn = partial(tokenize, **vocab)
+        dataset = MapperIterDataPipe(dataset, fn=tokenize_fn)
 
     if shuffle:
         buffer_conf = conf.get('shuffle_conf', {})

--
Gitblit v1.9.1