From 4e2fe544ae37174a3e09dfcdbbdae5abfe711e53 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 05 七月 2023 16:57:21 +0800
Subject: [PATCH] funasr sdk

---
 funasr/build_utils/build_model_from_file.py |   25 +++++++++++++++++++------
 1 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/funasr/build_utils/build_model_from_file.py b/funasr/build_utils/build_model_from_file.py
index 53eafc1..26542cd 100644
--- a/funasr/build_utils/build_model_from_file.py
+++ b/funasr/build_utils/build_model_from_file.py
@@ -6,7 +6,6 @@
 
 import torch
 import yaml
-from typeguard import check_argument_types
 
 from funasr.build_utils.build_model import build_model
 from funasr.models.base_model import FunASRModel
@@ -30,7 +29,6 @@
         device: Device type, "cpu", "cuda", or "cuda:N".
 
     """
-    assert check_argument_types()
     if config_file is None:
         assert model_file is not None, (
             "The argument 'model_file' must be provided "
@@ -74,7 +72,10 @@
             model_dict = torch.load(model_file, map_location=device)
     if task_name == "diar" and mode == "sond":
         model_dict = fileter_model_dict(model_dict, model.state_dict())
-    model.load_state_dict(model_dict)
+    if task_name == "vad":
+        model.encoder.load_state_dict(model_dict)
+    else:
+        model.load_state_dict(model_dict)
     if model_name_pth is not None and not os.path.exists(model_name_pth):
         torch.save(model_dict, model_name_pth)
         logging.info("model_file is saved to pth: {}".format(model_name_pth))
@@ -87,7 +88,7 @@
         ckpt,
         mode,
 ):
-    assert mode == "paraformer" or mode == "uniasr" or mode == "sond" or mode == "sv"
+    assert mode == "paraformer" or mode == "uniasr" or mode == "sond" or mode == "sv" or mode == "tp"
     logging.info("start convert tf model to torch model")
     from funasr.modules.streaming_utils.load_fr_tf import load_tf_dict
     var_dict_tf = load_tf_dict(ckpt)
@@ -148,7 +149,7 @@
         if model.decoder is not None:
             var_dict_torch_update_local = model.decoder.convert_tf2torch(var_dict_tf, var_dict_torch)
             var_dict_torch_update.update(var_dict_torch_update_local)
-    else:
+    elif "mode" == "sv":
         # speech encoder
         var_dict_torch_update_local = model.encoder.convert_tf2torch(var_dict_tf, var_dict_torch)
         var_dict_torch_update.update(var_dict_torch_update_local)
@@ -158,7 +159,19 @@
         # decoder
         var_dict_torch_update_local = model.decoder.convert_tf2torch(var_dict_tf, var_dict_torch)
         var_dict_torch_update.update(var_dict_torch_update_local)
-
+    else:
+        # encoder
+        var_dict_torch_update_local = model.encoder.convert_tf2torch(var_dict_tf, var_dict_torch)
+        var_dict_torch_update.update(var_dict_torch_update_local)
+        # predictor
+        var_dict_torch_update_local = model.predictor.convert_tf2torch(var_dict_tf, var_dict_torch)
+        var_dict_torch_update.update(var_dict_torch_update_local)
+        # decoder
+        var_dict_torch_update_local = model.decoder.convert_tf2torch(var_dict_tf, var_dict_torch)
+        var_dict_torch_update.update(var_dict_torch_update_local)
+        # bias_encoder
+        var_dict_torch_update_local = model.clas_convert_tf2torch(var_dict_tf, var_dict_torch)
+        var_dict_torch_update.update(var_dict_torch_update_local)
         return var_dict_torch_update
 
     return var_dict_torch_update

--
Gitblit v1.9.1