From 95bed2337e8065d3331109d6c2d00349ad82fd77 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 08 五月 2024 19:14:21 +0800
Subject: [PATCH] Merge branch 'dev_gzf_exp' of github.com:alibaba-damo-academy/FunASR into dev_gzf_exp merge
---
funasr/models/sense_voice/model.py | 28 ++++++++++++++
funasr/models/sense_voice/search.py | 59 +++++++++++++++++++++++++++++
README_zh.md | 4 +-
README.md | 4 +-
4 files changed, 91 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index faa758c..8b093bc 100644
--- a/README.md
+++ b/README.md
@@ -83,8 +83,8 @@
| fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃](https://huggingface.co/funasr/fsmn-vad) ) | voice activity detection | 5000 hours, Mandarin and English | 0.4M |
| fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃](https://huggingface.co/funasr/fa-zh) ) | timestamp prediction | 5000 hours, Mandarin | 38M |
| cam++ <br> ( [猸怾(https://modelscope.cn/models/iic/speech_campplus_sv_zh-cn_16k-common/summary) [馃](https://huggingface.co/funasr/campplus) ) | speaker verification/diarization | 5000 hours | 7.2M |
-| Whisper-large-v2 <br> ([猸怾(https://www.modelscope.cn/models/iic/speech_whisper-large_asr_multilingual/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1.5G |
-| Whisper-large-v3 <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1.5G |
+| Whisper-large-v2 <br> ([猸怾(https://www.modelscope.cn/models/iic/speech_whisper-large_asr_multilingual/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1550 M |
+| Whisper-large-v3 <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1550 M |
| Qwen-Audio <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo.py) [馃](https://huggingface.co/Qwen/Qwen-Audio) ) | audio-text multimodal models (pretraining) | multilingual | 8B |
| Qwen-Audio-Chat <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo_chat.py) [馃](https://huggingface.co/Qwen/Qwen-Audio-Chat) ) | audio-text multimodal models (chat) | multilingual | 8B |
diff --git a/README_zh.md b/README_zh.md
index 80c2e7e..963469a 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -80,11 +80,11 @@
| paraformer-zh-streaming <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃](https://huggingface.co/funasr/paraformer-zh-streaming) ) | 璇煶璇嗗埆锛屽疄鏃� | 60000灏忔椂锛屼腑鏂� | 220M |
| paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃](https://huggingface.co/funasr/paraformer-en) ) | 璇煶璇嗗埆锛岄潪瀹炴椂 | 50000灏忔椂锛岃嫳鏂� | 220M |
| conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃](https://huggingface.co/funasr/conformer-en) ) | 璇煶璇嗗埆锛岄潪瀹炴椂 | 50000灏忔椂锛岃嫳鏂� | 220M |
-| ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃](https://huggingface.co/funasr/ct-punc) ) | 鏍囩偣鎭㈠ | 100M锛屼腑鏂囦笌鑻辨枃 | 1.1G |
+| ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃](https://huggingface.co/funasr/ct-punc) ) | 鏍囩偣鎭㈠ | 100M锛屼腑鏂囦笌鑻辨枃 | 1.1B |
| fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃](https://huggingface.co/funasr/fsmn-vad) ) | 璇煶绔偣妫�娴嬶紝瀹炴椂 | 5000灏忔椂锛屼腑鏂囦笌鑻辨枃 | 0.4M |
| fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃](https://huggingface.co/funasr/fa-zh) ) | 瀛楃骇鍒椂闂存埑棰勬祴 | 50000灏忔椂锛屼腑鏂� | 38M |
| cam++ <br> ( [猸怾(https://modelscope.cn/models/iic/speech_campplus_sv_zh-cn_16k-common/summary) [馃](https://huggingface.co/funasr/campplus) ) | 璇磋瘽浜虹‘璁�/鍒嗗壊 | 5000灏忔椂 | 7.2M |
-| Whisper-large-v3 <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3/summary) [馃崁](https://github.com/openai/whisper) ) | 璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃� | 澶氳瑷� | 1G |
+| Whisper-large-v3 <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3/summary) [馃崁](https://github.com/openai/whisper) ) | 璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃� | 澶氳瑷� | 1550 M |
| Qwen-Audio <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo.py) [馃](https://huggingface.co/Qwen/Qwen-Audio) ) | 闊抽鏂囨湰澶氭ā鎬佸ぇ妯″瀷锛堥璁粌锛� | 澶氳瑷� | 8B |
| Qwen-Audio-Chat <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo_chat.py) [馃](https://huggingface.co/Qwen/Qwen-Audio-Chat) ) | 闊抽鏂囨湰澶氭ā鎬佸ぇ妯″瀷锛坈hat鐗堟湰锛� | 澶氳瑷� | 8B |
diff --git a/funasr/models/sense_voice/model.py b/funasr/models/sense_voice/model.py
index 0230638..00bc85b 100644
--- a/funasr/models/sense_voice/model.py
+++ b/funasr/models/sense_voice/model.py
@@ -514,6 +514,20 @@
self.beam_search.sos = sos_int
self.beam_search.eos = eos_int[0]
+ # Paramterts for rich decoding
+ self.beam_search.emo_unk = tokenizer.encode(
+ DecodingOptions.get("emo_unk_token", "<|SPECIAL_TOKEN_1|>"), allowed_special="all")[0]
+ self.beam_search.emo_unk_score = 1
+ self.beam_search.emo_tokens = tokenizer.encode(
+ DecodingOptions.get("emo_target_tokens", "<|HAPPY|><|SAD|><|ANGRY|>"), allowed_special="all")
+ self.beam_search.emo_scores = DecodingOptions.get("emo_target_threshold", [0.1, 0.1, 0.1])
+
+ self.beam_search.event_bg_token = tokenizer.encode(
+ DecodingOptions.get("gain_tokens_bg", "<|Speech|><|BGM|><|Applause|><|Laughter|>"), allowed_special="all")
+ self.beam_search.event_ed_token = tokenizer.encode(
+ DecodingOptions.get("gain_tokens_ed", "<|/Speech|><|/BGM|><|/Applause|><|/Laughter|>"), allowed_special="all")
+ self.beam_search.event_score_ga = DecodingOptions.get("gain_tokens_score", [1, 1, 1, 1])
+
encoder_out, encoder_out_lens = self.encode(
speech[None, :, :].permute(0, 2, 1), speech_lengths
)
@@ -843,6 +857,20 @@
self.beam_search.sos = sos_int
self.beam_search.eos = eos_int[0]
+ # Paramterts for rich decoding
+ self.beam_search.emo_unk = tokenizer.encode(
+ DecodingOptions.get("emo_unk_token", "<|SPECIAL_TOKEN_1|>"), allowed_special="all")[0]
+ self.beam_search.emo_unk_score = 1
+ self.beam_search.emo_tokens = tokenizer.encode(
+ DecodingOptions.get("emo_target_tokens", "<|HAPPY|><|SAD|><|ANGRY|>"), allowed_special="all")
+ self.beam_search.emo_scores = DecodingOptions.get("emo_target_threshold", [0.1, 0.1, 0.1])
+
+ self.beam_search.event_bg_token = tokenizer.encode(
+ DecodingOptions.get("gain_tokens_bg", "<|Speech|><|BGM|><|Applause|><|Laughter|>"), allowed_special="all")
+ self.beam_search.event_ed_token = tokenizer.encode(
+ DecodingOptions.get("gain_tokens_ed", "<|/Speech|><|/BGM|><|/Applause|><|/Laughter|>"), allowed_special="all")
+ self.beam_search.event_score_ga = DecodingOptions.get("gain_tokens_score", [1, 1, 1, 1])
+
encoder_out, encoder_out_lens = self.encode(
speech[None, :, :].permute(0, 2, 1), speech_lengths
)
diff --git a/funasr/models/sense_voice/search.py b/funasr/models/sense_voice/search.py
index 694e569..4400ce7 100644
--- a/funasr/models/sense_voice/search.py
+++ b/funasr/models/sense_voice/search.py
@@ -1,4 +1,5 @@
from itertools import chain
+from dataclasses import field
import logging
from typing import Any
from typing import Dict
@@ -8,6 +9,7 @@
from typing import Union
import torch
+import numpy as np
from funasr.metrics.common import end_detect
from funasr.models.transformer.scorers.scorer_interface import PartialScorerInterface
@@ -42,6 +44,17 @@
vocab_size: int,
sos=None,
eos=None,
+ # NOTE add rich decoding parameters
+ # [SPECIAL_TOKEN_1, HAPPY, SAD, ANGRY, NEUTRAL]
+ emo_unk: int = 58964,
+ emo_unk_score: float = 1.0,
+ emo_tokens: List[int] = field(default_factory=lambda: [58954, 58955, 58956, 58957]),
+ emo_scores: List[float] = field(default_factory=lambda: [0.1, 0.1, 0.1, 0.1]),
+ # [Speech, BGM, Laughter, Applause]
+ event_bg_token: List[int] = field(default_factory=lambda: [58946, 58948, 58950, 58952]),
+ event_ed_token: List[int] = field(default_factory=lambda: [58947, 58949, 58951, 58953]),
+ event_score_ga: List[float] = field(default_factory=lambda: [1, 1, 5, 25]),
+
token_list: List[str] = None,
pre_beam_ratio: float = 1.5,
pre_beam_score_key: str = None,
@@ -110,6 +123,14 @@
and len(self.part_scorers) > 0
)
+ self.emo_unk = emo_unk
+ self.emo_unk_score = emo_unk_score
+ self.emo_tokens = emo_tokens
+ self.emo_scores = emo_scores
+ self.event_bg_token = event_bg_token
+ self.event_ed_token = event_ed_token
+ self.event_score_ga = event_score_ga
+
def init_hyp(self, x: torch.Tensor) -> List[Hypothesis]:
"""Get an initial hypothesis data.
@@ -170,10 +191,48 @@
"""
scores = dict()
states = dict()
+
+ def get_score(yseq, sp1, sp2):
+ score = [0, 0]
+ last_token = yseq[-1]
+ last_token2 = yseq[-2] if len(yseq) > 1 else yseq[-1]
+ sum_sp1 = sum([1 if x == sp1 else 0 for x in yseq])
+ sum_sp2 = sum([1 if x == sp2 else 0 for x in yseq])
+ if sum_sp1 > sum_sp2 or last_token in [sp1, sp2]:
+ score[0] = -np.inf
+ if sum_sp2 >= sum_sp1:
+ score[1] = -np.inf
+ return score
+
+ def struct_score(yseq, score):
+ import math
+
+ last_token = yseq[-1]
+ if last_token in self.emo_tokens + [self.emo_unk]:
+ # prevent output event after emotation token
+ score[self.event_bg_token] = -np.inf
+
+ for eve_bg, eve_ed, eve_ga in zip(self.event_bg_token, self.event_ed_token, self.event_score_ga):
+ score_offset = get_score(yseq, eve_bg, eve_ed)
+ score[eve_bg] += score_offset[0]
+ score[eve_ed] += score_offset[1]
+ score[eve_bg] += math.log(eve_ga)
+
+
+ score[self.emo_unk] += math.log(self.emo_unk_score)
+ for emo, emo_th in zip(self.emo_tokens, self.emo_scores):
+ if score.argmax() == emo and score[emo] < math.log(emo_th):
+ score[self.emo_unk] = max(score[emo], score[self.emo_unk])
+ score[emo] = -np.inf
+ return score
+
for k, d in self.full_scorers.items():
scores[k], states[k] = d.score(hyp.yseq, hyp.states[k], x)
+ scores[k] = struct_score(hyp.yseq, scores[k])
+
return scores, states
+
def score_partial(
self, hyp: Hypothesis, ids: torch.Tensor, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
--
Gitblit v1.9.1