haoneng.lhn
2023-09-14 dcb92f13eddbf3032ce363b35f13f80afa8f94d1
add paraformer online opt infer code
3个文件已修改
183 ■■■■ 已修改文件
funasr/bin/asr_inference_launch.py 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/decoder/sanm_decoder.py 146 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/modules/attention.py 29 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/asr_inference_launch.py
@@ -853,7 +853,7 @@
                    "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
        cache["encoder"] = cache_en
        cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None}
        cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None, "chunk_size": chunk_size}
        cache["decoder"] = cache_de
        return cache
@@ -870,7 +870,7 @@
                        "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
            cache["encoder"] = cache_en
            cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None}
            cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None, "chunk_size": chunk_size}
            cache["decoder"] = cache_de
        return cache
@@ -982,8 +982,8 @@
        asr_result_list.append(item)
        if is_final:
            cache = _cache_reset(cache, chunk_size=chunk_size, batch_size=1,
                                 encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
            cache = _cache_reset(cache, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back,
                                 decoder_chunk_look_back=decoder_chunk_look_back, batch_size=1)
        return asr_result_list
    return _forward
funasr/models/decoder/sanm_decoder.py
@@ -105,7 +105,50 @@
        return x, tgt_mask, memory, memory_mask, cache
    def forward_chunk(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
    #def forward_chunk(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
    #    """Compute decoded features.
    #    Args:
    #        tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
    #        tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).
    #        memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).
    #        memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).
    #        cache (List[torch.Tensor]): List of cached tensors.
    #            Each tensor shape should be (#batch, maxlen_out - 1, size).
    #    Returns:
    #        torch.Tensor: Output tensor(#batch, maxlen_out, size).
    #        torch.Tensor: Mask for output tensor (#batch, maxlen_out).
    #        torch.Tensor: Encoded memory (#batch, maxlen_in, size).
    #        torch.Tensor: Encoded memory mask (#batch, maxlen_in).
    #    """
    #    # tgt = self.dropout(tgt)
    #    residual = tgt
    #    if self.normalize_before:
    #        tgt = self.norm1(tgt)
    #    tgt = self.feed_forward(tgt)
    #    x = tgt
    #    if self.self_attn:
    #        if self.normalize_before:
    #            tgt = self.norm2(tgt)
    #        if self.training:
    #            cache = None
    #        x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
    #        x = residual + self.dropout(x)
    #    if self.src_attn is not None:
    #        residual = x
    #        if self.normalize_before:
    #            x = self.norm3(x)
    #        x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
    #    return x, tgt_mask, memory, memory_mask, cache
    def forward_chunk(self, tgt, memory, fsmn_cache=None, opt_cache=None, chunk_size=None, look_back=0):
        """Compute decoded features.
        Args:
@@ -123,7 +166,6 @@
            torch.Tensor: Encoded memory mask (#batch, maxlen_in).
        """
        # tgt = self.dropout(tgt)
        residual = tgt
        if self.normalize_before:
            tgt = self.norm1(tgt)
@@ -133,9 +175,7 @@
        if self.self_attn:
            if self.normalize_before:
                tgt = self.norm2(tgt)
            if self.training:
                cache = None
            x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
            x, fsmn_cache = self.self_attn(tgt, None, fsmn_cache)
            x = residual + self.dropout(x)
        if self.src_attn is not None:
@@ -143,10 +183,11 @@
            if self.normalize_before:
                x = self.norm3(x)
            x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
            x, opt_cache = self.src_attn.forward_chunk(x, memory, opt_cache, chunk_size, look_back)
            x = residual + x
        return x, memory, fsmn_cache, opt_cache
        return x, tgt_mask, memory, memory_mask, cache
class FsmnDecoderSCAMAOpt(BaseTransformerDecoder):
    """
@@ -992,6 +1033,65 @@
        )
        return logp.squeeze(0), state
    #def forward_chunk(
    #    self,
    #    memory: torch.Tensor,
    #    tgt: torch.Tensor,
    #    cache: dict = None,
    #) -> Tuple[torch.Tensor, torch.Tensor]:
    #    """Forward decoder.
    #    Args:
    #        hs_pad: encoded memory, float32  (batch, maxlen_in, feat)
    #        hlens: (batch)
    #        ys_in_pad:
    #            input token ids, int64 (batch, maxlen_out)
    #            if input_layer == "embed"
    #            input tensor (batch, maxlen_out, #mels) in the other cases
    #        ys_in_lens: (batch)
    #    Returns:
    #        (tuple): tuple containing:
    #        x: decoded token score before softmax (batch, maxlen_out, token)
    #            if use_output_layer is True,
    #        olens: (batch, )
    #    """
    #    x = tgt
    #    if cache["decode_fsmn"] is None:
    #        cache_layer_num = len(self.decoders)
    #        if self.decoders2 is not None:
    #            cache_layer_num += len(self.decoders2)
    #        new_cache = [None] * cache_layer_num
    #    else:
    #        new_cache = cache["decode_fsmn"]
    #    for i in range(self.att_layer_num):
    #        decoder = self.decoders[i]
    #        x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
    #            x, None, memory, None, cache=new_cache[i]
    #        )
    #        new_cache[i] = c_ret
    #    if self.num_blocks - self.att_layer_num > 1:
    #        for i in range(self.num_blocks - self.att_layer_num):
    #            j = i + self.att_layer_num
    #            decoder = self.decoders2[i]
    #            x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
    #                x, None, memory, None, cache=new_cache[j]
    #            )
    #            new_cache[j] = c_ret
    #    for decoder in self.decoders3:
    #        x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
    #            x, None, memory, None, cache=None
    #        )
    #    if self.normalize_before:
    #        x = self.after_norm(x)
    #    if self.output_layer is not None:
    #        x = self.output_layer(x)
    #    cache["decode_fsmn"] = new_cache
    #    return x
    def forward_chunk(
        self,
        memory: torch.Tensor,
@@ -1020,35 +1120,43 @@
            cache_layer_num = len(self.decoders)
            if self.decoders2 is not None:
                cache_layer_num += len(self.decoders2)
            new_cache = [None] * cache_layer_num
            fsmn_cache = [None] * cache_layer_num
        else:
            new_cache = cache["decode_fsmn"]
            fsmn_cache = cache["decode_fsmn"]
        if cache["opt"] is None:
            cache_layer_num = len(self.decoders)
            opt_cache = [None] * cache_layer_num
        else:
            opt_cache = cache["opt"]
        for i in range(self.att_layer_num):
            decoder = self.decoders[i]
            x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
                x, None, memory, None, cache=new_cache[i]
            x, memory, fsmn_cache[i], opt_cache[i] = decoder.forward_chunk(
                x, memory, fsmn_cache=fsmn_cache[i], opt_cache=opt_cache[i],
                chunk_size=cache["chunk_size"], look_back=cache["decoder_chunk_look_back"]
            )
            new_cache[i] = c_ret
        if self.num_blocks - self.att_layer_num > 1:
            for i in range(self.num_blocks - self.att_layer_num):
                j = i + self.att_layer_num
                decoder = self.decoders2[i]
                x, tgt_mask, memory, memory_mask, c_ret = decoder.forward_chunk(
                    x, None, memory, None, cache=new_cache[j]
                x, memory, fsmn_cache[j], _  = decoder.forward_chunk(
                    x, memory, fsmn_cache=fsmn_cache[j]
                )
                new_cache[j] = c_ret
        for decoder in self.decoders3:
            x, tgt_mask, memory, memory_mask, _ = decoder.forward_chunk(
                x, None, memory, None, cache=None
            x, memory, _, _ = decoder.forward_chunk(
                x, memory
            )
        if self.normalize_before:
            x = self.after_norm(x)
        if self.output_layer is not None:
            x = self.output_layer(x)
        cache["decode_fsmn"] = new_cache
        cache["decode_fsmn"] = fsmn_cache
        if cache["decoder_chunk_look_back"] > 0 or cache["decoder_chunk_look_back"] == -1:
            cache["opt"] = opt_cache
        return x
    def forward_one_step(
funasr/modules/attention.py
@@ -705,6 +705,35 @@
        scores = torch.matmul(q_h, k_h.transpose(-2, -1))
        return self.forward_attention(v_h, scores, memory_mask)
    def forward_chunk(self, x, memory, cache=None, chunk_size=None, look_back=0):
        """Compute scaled dot product attention.
        Args:
            query (torch.Tensor): Query tensor (#batch, time1, size).
            key (torch.Tensor): Key tensor (#batch, time2, size).
            value (torch.Tensor): Value tensor (#batch, time2, size).
            mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
                (#batch, time1, time2).
        Returns:
            torch.Tensor: Output tensor (#batch, time1, d_model).
        """
        q_h, k_h, v_h = self.forward_qkv(x, memory)
        if chunk_size is not None and look_back > 0:
            if cache is not None:
                k_h = torch.cat((cache["k"], k_h), dim=2)
                v_h = torch.cat((cache["v"], v_h), dim=2)
                cache["k"] = k_h[:, :, -(look_back * chunk_size[1]):, :]
                cache["v"] = v_h[:, :, -(look_back * chunk_size[1]):, :]
            else:
                cache_tmp = {"k": k_h[:, :, -(look_back * chunk_size[1]):, :],
                             "v": v_h[:, :, -(look_back * chunk_size[1]):, :]}
                cache = cache_tmp
        q_h = q_h * self.d_k ** (-0.5)
        scores = torch.matmul(q_h, k_h.transpose(-2, -1))
        return self.forward_attention(v_h, scores, None), cache
class MultiHeadSelfAttention(nn.Module):
    """Multi-Head Attention layer.