From 55c09aeaa25b4bb88a50e09ba68fa6ff00a6d676 Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期一, 15 一月 2024 20:10:39 +0800
Subject: [PATCH] update readme, fix seaco bug
---
funasr/bin/train.py | 14 ++++++++------
1 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index b1f0d06..878eb24 100644
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -25,7 +25,9 @@
@hydra.main(config_name=None, version_base=None)
def main_hydra(kwargs: DictConfig):
- import pdb; pdb.set_trace()
+ if kwargs.get("debug", False):
+ import pdb; pdb.set_trace()
+
assert "model" in kwargs
if "model_conf" not in kwargs:
logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
@@ -62,14 +64,14 @@
tokenizer = kwargs.get("tokenizer", None)
if tokenizer is not None:
- tokenizer_class = tables.tokenizer_classes.get(tokenizer.lower())
+ tokenizer_class = tables.tokenizer_classes.get(tokenizer)
tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
kwargs["tokenizer"] = tokenizer
# build frontend if frontend is none None
frontend = kwargs.get("frontend", None)
if frontend is not None:
- frontend_class = tables.frontend_classes.get(frontend.lower())
+ frontend_class = tables.frontend_classes.get(frontend)
frontend = frontend_class(**kwargs["frontend_conf"])
kwargs["frontend"] = frontend
kwargs["input_size"] = frontend.output_size()
@@ -77,7 +79,7 @@
# import pdb;
# pdb.set_trace()
# build model
- model_class = tables.model_classes.get(kwargs["model"].lower())
+ model_class = tables.model_classes.get(kwargs["model"])
model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=len(tokenizer.token_list))
@@ -139,12 +141,12 @@
# import pdb;
# pdb.set_trace()
# dataset
- dataset_class = tables.dataset_classes.get(kwargs.get("dataset", "AudioDataset").lower())
+ dataset_class = tables.dataset_classes.get(kwargs.get("dataset", "AudioDataset"))
dataset_tr = dataset_class(kwargs.get("train_data_set_list"), frontend=frontend, tokenizer=tokenizer, **kwargs.get("dataset_conf"))
# dataloader
batch_sampler = kwargs["dataset_conf"].get("batch_sampler", "DynamicBatchLocalShuffleSampler")
- batch_sampler_class = tables.batch_sampler_classes.get(batch_sampler.lower())
+ batch_sampler_class = tables.batch_sampler_classes.get(batch_sampler)
if batch_sampler is not None:
batch_sampler = batch_sampler_class(dataset_tr, **kwargs.get("dataset_conf"))
dataloader_tr = torch.utils.data.DataLoader(dataset_tr,
--
Gitblit v1.9.1