From faa8ad377aa728818635074a74ee9176fed88fc5 Mon Sep 17 00:00:00 2001
From: 志浩 <neo.dzh@alibaba-inc.com>
Date: 星期四, 09 三月 2023 16:28:47 +0800
Subject: [PATCH] modify unit test for speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch
---
funasr/models/encoder/resnet34_encoder.py | 227 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 227 insertions(+), 0 deletions(-)
diff --git a/funasr/models/encoder/resnet34_encoder.py b/funasr/models/encoder/resnet34_encoder.py
index 930f7e0..91c5dc0 100644
--- a/funasr/models/encoder/resnet34_encoder.py
+++ b/funasr/models/encoder/resnet34_encoder.py
@@ -612,3 +612,230 @@
logging.warning("{} is missed from tf checkpoint".format(name))
return var_dict_torch_update
+
+
+class ResNet34SpL2RegDiar(ResNet34_SP_L2Reg):
+ def __init__(
+ self,
+ input_size,
+ embedding_node="resnet1_dense",
+ use_head_conv=True,
+ batchnorm_momentum=0.5,
+ use_head_maxpool=False,
+ num_nodes_pooling_layer=256,
+ layers_in_block=(3, 4, 6, 3),
+ filters_in_block=(32, 64, 128, 256),
+ num_nodes_resnet1=256,
+ num_nodes_last_layer=256,
+ pooling_type="window_shift",
+ pool_size=20,
+ stride=1,
+ tf2torch_tensor_name_prefix_torch="encoder",
+ tf2torch_tensor_name_prefix_tf="seq2seq/speech_encoder"
+ ):
+ super(ResNet34SpL2RegDiar, self).__init__(
+ input_size,
+ use_head_conv=use_head_conv,
+ batchnorm_momentum=batchnorm_momentum,
+ use_head_maxpool=use_head_maxpool,
+ num_nodes_pooling_layer=num_nodes_pooling_layer,
+ layers_in_block=layers_in_block,
+ filters_in_block=filters_in_block,
+ )
+
+ self.embedding_node = embedding_node
+ self.num_nodes_resnet1 = num_nodes_resnet1
+ self.num_nodes_last_layer = num_nodes_last_layer
+ self.pooling_type = pooling_type
+ self.pool_size = pool_size
+ self.stride = stride
+ self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
+ self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
+
+ self.resnet1_dense = torch.nn.Linear(num_nodes_pooling_layer * 2, num_nodes_resnet1)
+ self.resnet1_bn = torch.nn.BatchNorm1d(num_nodes_resnet1, eps=1e-3, momentum=batchnorm_momentum)
+
+ self.resnet2_dense = torch.nn.Linear(num_nodes_resnet1, num_nodes_last_layer)
+ self.resnet2_bn = torch.nn.BatchNorm1d(num_nodes_last_layer, eps=1e-3, momentum=batchnorm_momentum)
+
+ def output_size(self) -> int:
+ if self.embedding_node.startswith("resnet1"):
+ return self.num_nodes_resnet1
+ elif self.embedding_node.startswith("resnet2"):
+ return self.num_nodes_last_layer
+
+ return self.num_nodes_pooling_layer
+
+ def forward(
+ self,
+ xs_pad: torch.Tensor,
+ ilens: torch.Tensor,
+ prev_states: torch.Tensor = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
+
+ endpoints = OrderedDict()
+ res_out, ilens = super().forward(xs_pad, ilens)
+ endpoints["resnet0_bn"] = res_out
+ if self.pooling_type == "frame_gsp":
+ features = statistic_pooling(res_out, ilens, (2, ))
+ else:
+ features, ilens = windowed_statistic_pooling(res_out, ilens, (2, ), self.pool_size, self.stride)
+ features = features.transpose(1, 2)
+ endpoints["pooling"] = features
+
+ features = self.resnet1_dense(features)
+ endpoints["resnet1_dense"] = features
+ features = F.relu(features)
+ endpoints["resnet1_relu"] = features
+ features = self.resnet1_bn(features.transpose(1, 2)).transpose(1, 2)
+ endpoints["resnet1_bn"] = features
+
+ features = self.resnet2_dense(features)
+ endpoints["resnet2_dense"] = features
+ features = F.relu(features)
+ endpoints["resnet2_relu"] = features
+ features = self.resnet2_bn(features.transpose(1, 2)).transpose(1, 2)
+ endpoints["resnet2_bn"] = features
+
+ return endpoints[self.embedding_node], ilens, None
+
+ def gen_tf2torch_map_dict(self):
+ tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
+ tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
+ train_steps = 720000
+ map_dict_local = {
+ # torch: conv1d.weight in "out_channel in_channel kernel_size"
+ # tf : conv1d.weight in "kernel_size in_channel out_channel"
+ # torch: linear.weight in "out_channel in_channel"
+ # tf : dense.weight in "in_channel out_channel"
+ "{}.pre_conv.weight".format(tensor_name_prefix_torch):
+ {"name": "{}/pre_conv/kernel".format(tensor_name_prefix_tf),
+ "squeeze": None,
+ "transpose": (3, 2, 0, 1),
+ },
+ "{}.pre_conv_bn.bias".format(tensor_name_prefix_torch):
+ {"name": "{}/pre_conv_bn/beta".format(tensor_name_prefix_tf),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.pre_conv_bn.weight".format(tensor_name_prefix_torch):
+ {"name": "{}/pre_conv_bn/gamma".format(tensor_name_prefix_tf),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.pre_conv_bn.running_mean".format(tensor_name_prefix_torch):
+ {"name": "{}/pre_conv_bn/moving_mean".format(tensor_name_prefix_tf),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.pre_conv_bn.running_var".format(tensor_name_prefix_torch):
+ {"name": "{}/pre_conv_bn/moving_variance".format(tensor_name_prefix_tf),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.pre_conv_bn.num_batches_tracked".format(tensor_name_prefix_torch): train_steps
+ }
+ for layer_idx in range(3):
+ map_dict_local.update({
+ "{}.resnet{}_dense.weight".format(tensor_name_prefix_torch, layer_idx):
+ {"name": "{}/resnet{}_dense/kernel".format(tensor_name_prefix_tf, layer_idx),
+ "squeeze": None,
+ "transpose": (3, 2, 0, 1) if layer_idx == 0 else (1, 0),
+ },
+ "{}.resnet{}_dense.bias".format(tensor_name_prefix_torch, layer_idx):
+ {"name": "{}/resnet{}_dense/bias".format(tensor_name_prefix_tf, layer_idx),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.resnet{}_bn.weight".format(tensor_name_prefix_torch, layer_idx):
+ {"name": "{}/resnet{}_bn/gamma".format(tensor_name_prefix_tf, layer_idx),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.resnet{}_bn.bias".format(tensor_name_prefix_torch, layer_idx):
+ {"name": "{}/resnet{}_bn/beta".format(tensor_name_prefix_tf, layer_idx),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.resnet{}_bn.running_mean".format(tensor_name_prefix_torch, layer_idx):
+ {"name": "{}/resnet{}_bn/moving_mean".format(tensor_name_prefix_tf, layer_idx),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.resnet{}_bn.running_var".format(tensor_name_prefix_torch, layer_idx):
+ {"name": "{}/resnet{}_bn/moving_variance".format(tensor_name_prefix_tf, layer_idx),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.resnet{}_bn.num_batches_tracked".format(tensor_name_prefix_torch, layer_idx): train_steps
+ })
+
+ for block_idx in range(len(self.layers_in_block)):
+ for layer_idx in range(self.layers_in_block[block_idx]):
+ for i in ["1", "2", "_sc"]:
+ map_dict_local.update({
+ "{}.block_{}.layer_{}.conv{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+ {"name": "{}/block_{}/layer_{}/conv{}/kernel".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+ "squeeze": None,
+ "transpose": (3, 2, 0, 1),
+ },
+ "{}.block_{}.layer_{}.bn{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+ {"name": "{}/block_{}/layer_{}/bn{}/gamma".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.block_{}.layer_{}.bn{}.bias".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+ {"name": "{}/block_{}/layer_{}/bn{}/beta".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.block_{}.layer_{}.bn{}.running_mean".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+ {"name": "{}/block_{}/layer_{}/bn{}/moving_mean".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.block_{}.layer_{}.bn{}.running_var".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+ {"name": "{}/block_{}/layer_{}/bn{}/moving_variance".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+ "squeeze": None,
+ "transpose": None,
+ },
+ "{}.block_{}.layer_{}.bn{}.num_batches_tracked".format(tensor_name_prefix_torch, block_idx, layer_idx, i): train_steps,
+ })
+
+ return map_dict_local
+
+ def convert_tf2torch(self,
+ var_dict_tf,
+ var_dict_torch,
+ ):
+
+ map_dict = self.gen_tf2torch_map_dict()
+
+ var_dict_torch_update = dict()
+ for name in sorted(var_dict_torch.keys(), reverse=False):
+ if name.startswith(self.tf2torch_tensor_name_prefix_torch):
+ if name in map_dict:
+ if "num_batches_tracked" not in name:
+ name_tf = map_dict[name]["name"]
+ data_tf = var_dict_tf[name_tf]
+ if map_dict[name]["squeeze"] is not None:
+ data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
+ if map_dict[name]["transpose"] is not None:
+ data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
+ data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
+ assert var_dict_torch[name].size() == data_tf.size(), \
+ "{}, {}, {} != {}".format(name, name_tf,
+ var_dict_torch[name].size(), data_tf.size())
+ var_dict_torch_update[name] = data_tf
+ logging.info("torch tensor: {}, {}, loading from tf tensor: {}, {}".format(
+ name, data_tf.size(), name_tf, var_dict_tf[name_tf].shape
+ ))
+ else:
+ var_dict_torch_update[name] = torch.Tensor(map_dict[name]).type(torch.int64).to("cpu")
+ logging.info("torch tensor: {}, manually assigning to: {}".format(
+ name, map_dict[name]
+ ))
+ else:
+ logging.warning("{} is missed from tf checkpoint".format(name))
+
+ return var_dict_torch_update
--
Gitblit v1.9.1