From 60aef2aa96750e86158d83e52429750cf4b626e3 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 10 二月 2023 19:32:39 +0800
Subject: [PATCH] Merge pull request #95 from alibaba-damo-academy/dev_dzh

---
 funasr/models/encoder/resnet34_encoder.py |  477 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 474 insertions(+), 3 deletions(-)

diff --git a/funasr/models/encoder/resnet34_encoder.py b/funasr/models/encoder/resnet34_encoder.py
index 66e446c..952ce15 100644
--- a/funasr/models/encoder/resnet34_encoder.py
+++ b/funasr/models/encoder/resnet34_encoder.py
@@ -1,7 +1,11 @@
 import torch
 from torch.nn import functional as F
 from funasr.models.encoder.abs_encoder import AbsEncoder
-from typing import Tuple
+from typing import Tuple, Optional
+from funasr.models.pooling.statistic_pooling import statistic_pooling, windowed_statistic_pooling
+from collections import OrderedDict
+import logging
+import numpy as np
 
 
 class BasicLayer(torch.nn.Module):
@@ -116,10 +120,18 @@
         self.resnet0_dense = torch.nn.Conv2d(filters_in_block[-1], num_nodes_pooling_layer, 1)
         self.resnet0_bn = torch.nn.BatchNorm2d(num_nodes_pooling_layer, eps=1e-3, momentum=batchnorm_momentum)
 
+        self.time_ds_ratio = 8
+
     def output_size(self) -> int:
         return self.num_nodes_pooling_layer
 
-    def forward(self, xs_pad: torch.Tensor, ilens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+    def forward(
+            self,
+            xs_pad: torch.Tensor,
+            ilens: torch.Tensor,
+            prev_states: torch.Tensor = None
+    ) -> Tuple[torch.Tensor, torch.Tensor]:
+
         features = xs_pad
         assert features.size(-1) == self.input_size, \
             "Dimension of features {} doesn't match the input_size {}.".format(features.size(-1), self.input_size)
@@ -141,4 +153,463 @@
         features = F.relu(features)
         features = self.resnet0_bn(features)
 
-        return features, ilens // 8
+        return features, resnet_out_lens
+
+# Note: For training, this implement is not equivalent to tf because of the kernel_regularizer in tf.layers.
+# TODO: implement kernel_regularizer in torch with munal loss addition or weigth_decay in the optimizer
+class ResNet34_SP_L2Reg(AbsEncoder):
+    def __init__(
+            self,
+            input_size,
+            use_head_conv=True,
+            batchnorm_momentum=0.5,
+            use_head_maxpool=False,
+            num_nodes_pooling_layer=256,
+            layers_in_block=(3, 4, 6, 3),
+            filters_in_block=(32, 64, 128, 256),
+            tf2torch_tensor_name_prefix_torch="encoder",
+            tf2torch_tensor_name_prefix_tf="EAND/speech_encoder",
+            tf_train_steps=720000,
+    ):
+        super(ResNet34_SP_L2Reg, self).__init__()
+
+        self.use_head_conv = use_head_conv
+        self.use_head_maxpool = use_head_maxpool
+        self.num_nodes_pooling_layer = num_nodes_pooling_layer
+        self.layers_in_block = layers_in_block
+        self.filters_in_block = filters_in_block
+        self.input_size = input_size
+        self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
+        self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
+        self.tf_train_steps = tf_train_steps
+
+        pre_filters = filters_in_block[0]
+        if use_head_conv:
+            self.pre_conv = torch.nn.Conv2d(1, pre_filters, 3, 1, 1, bias=False, padding_mode="zeros")
+            self.pre_conv_bn = torch.nn.BatchNorm2d(pre_filters, eps=1e-3, momentum=batchnorm_momentum)
+
+        if use_head_maxpool:
+            self.head_maxpool = torch.nn.MaxPool2d(3, 1, padding=1)
+
+        for i in range(len(layers_in_block)):
+            if i == 0:
+                in_filters = pre_filters if self.use_head_conv else 1
+            else:
+                in_filters = filters_in_block[i-1]
+
+            block = BasicBlock(in_filters,
+                               filters=filters_in_block[i],
+                               num_layer=layers_in_block[i],
+                               stride=1 if i == 0 else 2,
+                               bn_momentum=batchnorm_momentum)
+            self.add_module("block_{}".format(i), block)
+
+        self.resnet0_dense = torch.nn.Conv1d(filters_in_block[-1] * input_size // 8, num_nodes_pooling_layer, 1)
+        self.resnet0_bn = torch.nn.BatchNorm1d(num_nodes_pooling_layer, eps=1e-3, momentum=batchnorm_momentum)
+
+        self.time_ds_ratio = 8
+
+    def output_size(self) -> int:
+        return self.num_nodes_pooling_layer
+
+    def forward(
+            self,
+            xs_pad: torch.Tensor,
+            ilens: torch.Tensor,
+            prev_states: torch.Tensor = None
+    ) -> Tuple[torch.Tensor, torch.Tensor]:
+
+        features = xs_pad
+        assert features.size(-1) == self.input_size, \
+            "Dimension of features {} doesn't match the input_size {}.".format(features.size(-1), self.input_size)
+        features = torch.unsqueeze(features, dim=1)
+        if self.use_head_conv:
+            features = self.pre_conv(features)
+            features = self.pre_conv_bn(features)
+            features = F.relu(features)
+
+        if self.use_head_maxpool:
+            features = self.head_maxpool(features)
+
+        resnet_outs, resnet_out_lens = features, ilens
+        for i in range(len(self.layers_in_block)):
+            block = self._modules["block_{}".format(i)]
+            resnet_outs, resnet_out_lens = block(resnet_outs, resnet_out_lens)
+
+        # B, C, T, F
+        bb, cc, tt, ff = resnet_outs.shape
+        resnet_outs = torch.reshape(resnet_outs.permute(0, 3, 1, 2), [bb, ff*cc, tt])
+        features = self.resnet0_dense(resnet_outs)
+        features = F.relu(features)
+        features = self.resnet0_bn(features)
+
+        return features, resnet_out_lens
+
+    def gen_tf2torch_map_dict(self):
+        tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
+        tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
+        train_steps = self.tf_train_steps
+        map_dict_local = {
+            # torch: conv1d.weight in "out_channel in_channel kernel_size"
+            # tf   : conv1d.weight in "kernel_size in_channel out_channel"
+            # torch: linear.weight in "out_channel in_channel"
+            # tf   :  dense.weight in "in_channel out_channel"
+            "{}.pre_conv.weight".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv/kernel".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": (3, 2, 0, 1),
+                 },
+            "{}.pre_conv_bn.bias".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/beta".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.weight".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/gamma".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.running_mean".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/moving_mean".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.running_var".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/moving_variance".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.num_batches_tracked".format(tensor_name_prefix_torch): train_steps
+        }
+        for layer_idx in range(3):
+            map_dict_local.update({
+                "{}.resnet{}_dense.weight".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_dense/kernel".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": (2, 1, 0) if layer_idx == 0 else (1, 0),
+                     },
+                "{}.resnet{}_dense.bias".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_dense/bias".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.weight".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/gamma".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.bias".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/beta".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.running_mean".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/moving_mean".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.running_var".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/moving_variance".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.num_batches_tracked".format(tensor_name_prefix_torch, layer_idx): train_steps
+            })
+
+        for block_idx in range(len(self.layers_in_block)):
+            for layer_idx in range(self.layers_in_block[block_idx]):
+                for i in ["1", "2", "_sc"]:
+                    map_dict_local.update({
+                        "{}.block_{}.layer_{}.conv{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/conv{}/kernel".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": (3, 2, 0, 1),
+                             },
+                        "{}.block_{}.layer_{}.bn{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/gamma".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.bias".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/beta".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.running_mean".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/moving_mean".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.running_var".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/moving_variance".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.num_batches_tracked".format(tensor_name_prefix_torch, block_idx, layer_idx, i): train_steps,
+                    })
+
+        return map_dict_local
+
+    def convert_tf2torch(self,
+                         var_dict_tf,
+                         var_dict_torch,
+                         ):
+
+        map_dict = self.gen_tf2torch_map_dict()
+
+        var_dict_torch_update = dict()
+        for name in sorted(var_dict_torch.keys(), reverse=False):
+            if name.startswith(self.tf2torch_tensor_name_prefix_torch):
+                if name in map_dict:
+                    if "num_batches_tracked" not in name:
+                        name_tf = map_dict[name]["name"]
+                        data_tf = var_dict_tf[name_tf]
+                        if map_dict[name]["squeeze"] is not None:
+                            data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
+                        if map_dict[name]["transpose"] is not None:
+                            data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
+                        data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
+                        assert var_dict_torch[name].size() == data_tf.size(), \
+                            "{}, {}, {} != {}".format(name, name_tf,
+                                                      var_dict_torch[name].size(), data_tf.size())
+                        var_dict_torch_update[name] = data_tf
+                        logging.info("torch tensor: {}, {}, loading from tf tensor: {}, {}".format(
+                            name, data_tf.size(), name_tf, var_dict_tf[name_tf].shape
+                        ))
+                    else:
+                        var_dict_torch_update[name] = torch.Tensor(map_dict[name]).type(torch.int64).to("cpu")
+                        logging.info("torch tensor: {}, manually assigning to: {}".format(
+                            name, map_dict[name]
+                        ))
+                else:
+                    logging.warning("{} is missed from tf checkpoint".format(name))
+
+        return var_dict_torch_update
+
+
+
+class ResNet34Diar(ResNet34):
+    def __init__(
+            self,
+            input_size,
+            embedding_node="resnet1_dense",
+            use_head_conv=True,
+            batchnorm_momentum=0.5,
+            use_head_maxpool=False,
+            num_nodes_pooling_layer=256,
+            layers_in_block=(3, 4, 6, 3),
+            filters_in_block=(32, 64, 128, 256),
+            num_nodes_resnet1=256,
+            num_nodes_last_layer=256,
+            pooling_type="window_shift",
+            pool_size=20,
+            stride=1,
+            tf2torch_tensor_name_prefix_torch="encoder",
+            tf2torch_tensor_name_prefix_tf="seq2seq/speech_encoder"
+    ):
+        super(ResNet34Diar, self).__init__(
+            input_size,
+            use_head_conv=use_head_conv,
+            batchnorm_momentum=batchnorm_momentum,
+            use_head_maxpool=use_head_maxpool,
+            num_nodes_pooling_layer=num_nodes_pooling_layer,
+            layers_in_block=layers_in_block,
+            filters_in_block=filters_in_block,
+        )
+
+        self.embedding_node = embedding_node
+        self.num_nodes_resnet1 = num_nodes_resnet1
+        self.num_nodes_last_layer = num_nodes_last_layer
+        self.pooling_type = pooling_type
+        self.pool_size = pool_size
+        self.stride = stride
+        self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
+        self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
+
+        self.resnet1_dense = torch.nn.Linear(num_nodes_pooling_layer * 2, num_nodes_resnet1)
+        self.resnet1_bn = torch.nn.BatchNorm1d(num_nodes_resnet1, eps=1e-3, momentum=batchnorm_momentum)
+
+        self.resnet2_dense = torch.nn.Linear(num_nodes_resnet1, num_nodes_last_layer)
+        self.resnet2_bn = torch.nn.BatchNorm1d(num_nodes_last_layer, eps=1e-3, momentum=batchnorm_momentum)
+
+    def output_size(self) -> int:
+        if self.embedding_node.startswith("resnet1"):
+            return self.num_nodes_resnet1
+        elif self.embedding_node.startswith("resnet2"):
+            return self.num_nodes_last_layer
+
+        return self.num_nodes_pooling_layer
+
+    def forward(
+            self,
+            xs_pad: torch.Tensor,
+            ilens: torch.Tensor,
+            prev_states: torch.Tensor = None,
+    ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
+
+        endpoints = OrderedDict()
+        res_out, ilens = super().forward(xs_pad, ilens)
+        endpoints["resnet0_bn"] = res_out
+        if self.pooling_type == "frame_gsp":
+            features = statistic_pooling(res_out, ilens, (3, ))
+        else:
+            features, ilens = windowed_statistic_pooling(res_out, ilens, (2, 3), self.pool_size, self.stride)
+        features = features.transpose(1, 2)
+        endpoints["pooling"] = features
+
+        features = self.resnet1_dense(features)
+        endpoints["resnet1_dense"] = features
+        features = F.relu(features)
+        endpoints["resnet1_relu"] = features
+        features = self.resnet1_bn(features.transpose(1, 2)).transpose(1, 2)
+        endpoints["resnet1_bn"] = features
+
+        features = self.resnet2_dense(features)
+        endpoints["resnet2_dense"] = features
+        features = F.relu(features)
+        endpoints["resnet2_relu"] = features
+        features = self.resnet2_bn(features.transpose(1, 2)).transpose(1, 2)
+        endpoints["resnet2_bn"] = features
+
+        return endpoints[self.embedding_node], ilens, None
+
+    def gen_tf2torch_map_dict(self):
+        tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
+        tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
+        train_steps = 300000
+        map_dict_local = {
+            # torch: conv1d.weight in "out_channel in_channel kernel_size"
+            # tf   : conv1d.weight in "kernel_size in_channel out_channel"
+            # torch: linear.weight in "out_channel in_channel"
+            # tf   :  dense.weight in "in_channel out_channel"
+            "{}.pre_conv.weight".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv/kernel".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": (3, 2, 0, 1),
+                 },
+            "{}.pre_conv_bn.bias".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/beta".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.weight".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/gamma".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.running_mean".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/moving_mean".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.running_var".format(tensor_name_prefix_torch):
+                {"name": "{}/pre_conv_bn/moving_variance".format(tensor_name_prefix_tf),
+                 "squeeze": None,
+                 "transpose": None,
+                 },
+            "{}.pre_conv_bn.num_batches_tracked".format(tensor_name_prefix_torch): train_steps
+        }
+        for layer_idx in range(3):
+            map_dict_local.update({
+                "{}.resnet{}_dense.weight".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_dense/kernel".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": (3, 2, 0, 1) if layer_idx == 0 else (1, 0),
+                     },
+                "{}.resnet{}_dense.bias".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_dense/bias".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.weight".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/gamma".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.bias".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/beta".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.running_mean".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/moving_mean".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.running_var".format(tensor_name_prefix_torch, layer_idx):
+                    {"name": "{}/resnet{}_bn/moving_variance".format(tensor_name_prefix_tf, layer_idx),
+                     "squeeze": None,
+                     "transpose": None,
+                     },
+                "{}.resnet{}_bn.num_batches_tracked".format(tensor_name_prefix_torch, layer_idx): train_steps
+            })
+
+        for block_idx in range(len(self.layers_in_block)):
+            for layer_idx in range(self.layers_in_block[block_idx]):
+                for i in ["1", "2", "_sc"]:
+                    map_dict_local.update({
+                        "{}.block_{}.layer_{}.conv{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/conv{}/kernel".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": (3, 2, 0, 1),
+                             },
+                        "{}.block_{}.layer_{}.bn{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/gamma".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.bias".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/beta".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.running_mean".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/moving_mean".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.running_var".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
+                            {"name": "{}/block_{}/layer_{}/bn{}/moving_variance".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
+                             "squeeze": None,
+                             "transpose": None,
+                             },
+                        "{}.block_{}.layer_{}.bn{}.num_batches_tracked".format(tensor_name_prefix_torch, block_idx, layer_idx, i): train_steps,
+                    })
+
+        return map_dict_local
+
+    def convert_tf2torch(self,
+                         var_dict_tf,
+                         var_dict_torch,
+                         ):
+
+        map_dict = self.gen_tf2torch_map_dict()
+
+        var_dict_torch_update = dict()
+        for name in sorted(var_dict_torch.keys(), reverse=False):
+            if name.startswith(self.tf2torch_tensor_name_prefix_torch):
+                if name in map_dict:
+                    if "num_batches_tracked" not in name:
+                        name_tf = map_dict[name]["name"]
+                        data_tf = var_dict_tf[name_tf]
+                        if map_dict[name]["squeeze"] is not None:
+                            data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
+                        if map_dict[name]["transpose"] is not None:
+                            data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
+                        data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
+                        assert var_dict_torch[name].size() == data_tf.size(), \
+                            "{}, {}, {} != {}".format(name, name_tf,
+                                                      var_dict_torch[name].size(), data_tf.size())
+                        var_dict_torch_update[name] = data_tf
+                        logging.info("torch tensor: {}, {}, loading from tf tensor: {}, {}".format(
+                            name, data_tf.size(), name_tf, var_dict_tf[name_tf].shape
+                        ))
+                    else:
+                        var_dict_torch_update[name] = torch.Tensor(map_dict[name]).type(torch.int64).to("cpu")
+                        logging.info("torch tensor: {}, manually assigning to: {}".format(
+                            name, map_dict[name]
+                        ))
+                else:
+                    logging.warning("{} is missed from tf checkpoint".format(name))
+
+        return var_dict_torch_update

--
Gitblit v1.9.1