From e8590bb1e9a26ce9bf8f0991d53b5ea4f0d77627 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 08 一月 2024 16:40:43 +0800
Subject: [PATCH] funasr1.0 emotion2vec
---
funasr/models/emotion2vec/model.py | 17 ++++++++++++++++-
1 files changed, 16 insertions(+), 1 deletions(-)
diff --git a/funasr/models/emotion2vec/model.py b/funasr/models/emotion2vec/model.py
index e882b6e..315c1cc 100644
--- a/funasr/models/emotion2vec/model.py
+++ b/funasr/models/emotion2vec/model.py
@@ -1,5 +1,11 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+# Modified from https://github.com/ddlBoJack/emotion2vec/tree/main
import logging
+import os
from functools import partial
import numpy as np
@@ -21,7 +27,11 @@
@tables.register("model_classes", "Emotion2vec")
class Emotion2vec(nn.Module):
-
+ """
+ Author: Ziyang Ma, Zhisheng Zheng, Jiaxin Ye, Jinchao Li, Zhifu Gao, Shiliang Zhang, Xie Chen
+ emotion2vec: Self-Supervised Pre-Training for Speech Emotion Representation
+ https://arxiv.org/abs/2312.15185
+ """
def __init__(self, **kwargs):
super().__init__()
# import pdb; pdb.set_trace()
@@ -196,6 +206,9 @@
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
results = []
+ output_dir = kwargs.get("output_dir")
+ if output_dir:
+ os.makedirs(output_dir, exist_ok=True)
for i, wav in enumerate(audio_sample_list):
source = wav.to(device=kwargs["device"])
if self.cfg.normalize:
@@ -211,5 +224,7 @@
result_i = {"key": key[i], "feats": feats}
results.append(result_i)
+ if output_dir:
+ np.save(os.path.join(output_dir, "{}.npy".format(key[i])), feats)
return results, meta_data
\ No newline at end of file
--
Gitblit v1.9.1