From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交

---
 funasr/datasets/dataloader_entry.py |   39 ++++++++++++++++++++++-----------------
 1 files changed, 22 insertions(+), 17 deletions(-)

diff --git a/funasr/datasets/dataloader_entry.py b/funasr/datasets/dataloader_entry.py
index 9595805..055e4c8 100644
--- a/funasr/datasets/dataloader_entry.py
+++ b/funasr/datasets/dataloader_entry.py
@@ -14,14 +14,14 @@
         frontend=frontend,
         tokenizer=tokenizer,
         is_training=True,
-        **kwargs.get("dataset_conf")
+        **kwargs.get("dataset_conf"),
     )
     dataset_val = dataset_class(
         kwargs.get("valid_data_set_list"),
         frontend=frontend,
         tokenizer=tokenizer,
         is_training=False,
-        **kwargs.get("dataset_conf")
+        **kwargs.get("dataset_conf"),
     )
 
     # dataloader
@@ -49,34 +49,37 @@
     def __init__(self, frontend=None, tokenizer=None, **kwargs):
         # dataset
         logging.info("Build dataloader")
+
         dataset_class = tables.dataset_classes.get(kwargs.get("dataset", "AudioDataset"))
-        dataset_tr = dataset_class(
-            kwargs.get("train_data_set_list"),
-            frontend=frontend,
-            tokenizer=tokenizer,
-            is_training=True,
-            **kwargs.get("dataset_conf")
-        )
+        dataset_tr = None
+        # split dataset
+        self.data_split_num = kwargs["dataset_conf"].get("data_split_num", 1)
+        if self.data_split_num == 1:
+            dataset_tr = dataset_class(
+                kwargs.get("train_data_set_list"),
+                frontend=frontend,
+                tokenizer=tokenizer,
+                is_training=True,
+                **kwargs.get("dataset_conf"),
+            )
         dataset_val = dataset_class(
             kwargs.get("valid_data_set_list"),
             frontend=frontend,
             tokenizer=tokenizer,
             is_training=False,
-            **kwargs.get("dataset_conf")
+            **kwargs.get("dataset_conf"),
         )
 
         self.dataset_tr = dataset_tr
         self.dataset_val = dataset_val
         self.kwargs = kwargs
 
-        # split dataset
-        self.data_split_num = kwargs["dataset_conf"].get("data_split_num", 1)
         self.dataset_class = dataset_class
         self.frontend = frontend
         self.tokenizer = tokenizer
         self.kwargs = kwargs
 
-    def build_iter(self, epoch=0, data_split_i=0, **kwargs):
+    def build_iter(self, epoch=0, data_split_i=0, start_step=0, **kwargs):
 
         # reload dataset slice
         if self.data_split_num > 1:
@@ -87,7 +90,7 @@
                 tokenizer=self.tokenizer,
                 is_training=True,
                 **self.kwargs.get("dataset_conf"),
-                data_split_i=data_split_i
+                data_split_i=data_split_i,
             )
 
         # dataloader
@@ -95,7 +98,9 @@
         batch_sampler_val = None
         if batch_sampler is not None:
             batch_sampler_class = tables.batch_sampler_classes.get(batch_sampler)
-            batch_sampler = batch_sampler_class(self.dataset_tr, **self.kwargs.get("dataset_conf"))
+            batch_sampler = batch_sampler_class(
+                self.dataset_tr, start_step=start_step, **self.kwargs.get("dataset_conf")
+            )
             batch_sampler_val = batch_sampler_class(
                 self.dataset_val, is_training=False, **self.kwargs.get("dataset_conf")
             )
@@ -121,14 +126,14 @@
         frontend=frontend,
         tokenizer=tokenizer,
         is_training=True,
-        **kwargs.get("dataset_conf")
+        **kwargs.get("dataset_conf"),
     )
     dataset_val = dataset_class(
         kwargs.get("valid_data_set_list"),
         frontend=frontend,
         tokenizer=tokenizer,
         is_training=False,
-        **kwargs.get("dataset_conf")
+        **kwargs.get("dataset_conf"),
     )
 
     return dataset_tr, dataset_val

--
Gitblit v1.9.1