From ae013cf597db1c523c9fac21b7e83db62304ae2d Mon Sep 17 00:00:00 2001
From: zhaomingwork <61895407+zhaomingwork@users.noreply.github.com>
Date: 星期四, 08 五月 2025 23:52:09 +0800
Subject: [PATCH] fix bug for core dump in http, use libboost as parse (#2509)

---
 funasr/datasets/audio_datasets/samplers.py |    9 +++------
 1 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/funasr/datasets/audio_datasets/samplers.py b/funasr/datasets/audio_datasets/samplers.py
index bddf186..f7057de 100644
--- a/funasr/datasets/audio_datasets/samplers.py
+++ b/funasr/datasets/audio_datasets/samplers.py
@@ -392,17 +392,14 @@
             )
             batch = []
             max_len_in_batch = 0
-            count = 0
+            count = 1
             for idx in buffer:
                 original_sample_length = self.dataset.get_source_len(idx)
                 if original_sample_length > self.max_token_length:
                     continue
                 sample_length = 1 if self.batch_type == "example" else original_sample_length
                 potential_batch_length = max(max_len_in_batch, sample_length) * (len(batch) + 1)
-                if (
-                    potential_batch_length <= self.batch_size
-                    and count <= self.batch_size_sample_max
-                ):
+                if potential_batch_length <= self.batch_size and count < self.batch_size_sample_max:
                     batch.append(idx)
                     max_len_in_batch = max(max_len_in_batch, sample_length)
                     count += 1
@@ -410,7 +407,7 @@
                     buffer_batches.append(batch)
                     batch = [idx]
                     max_len_in_batch = sample_length
-                    count = 0
+                    count = 1
             if batch:
                 buffer_batches.append(batch)
 

--
Gitblit v1.9.1