From d80ac2fd2df4e7fb8a28acfa512bb11472b5cc99 Mon Sep 17 00:00:00 2001
From: liugz18 <57401541+liugz18@users.noreply.github.com>
Date: 星期四, 18 七月 2024 21:34:55 +0800
Subject: [PATCH] Rename 'res' in line 514 to avoid with naming conflict with line 365
---
funasr/models/emotion2vec/timm_modules.py | 54 ++++++++++++++++++++++++------------------------------
1 files changed, 24 insertions(+), 30 deletions(-)
diff --git a/funasr/models/emotion2vec/timm_modules.py b/funasr/models/emotion2vec/timm_modules.py
index 1f6285a..416d2cb 100644
--- a/funasr/models/emotion2vec/timm_modules.py
+++ b/funasr/models/emotion2vec/timm_modules.py
@@ -1,14 +1,10 @@
-from itertools import repeat
-import collections.abc
-from functools import partial
-from typing import Optional, Tuple
-import numpy as np
-
-import torch
import torch.nn as nn
-import torch.nn.functional as F
+import collections.abc
+from itertools import repeat
+from functools import partial
-def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
+
+def drop_path(x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
@@ -18,7 +14,7 @@
'survival rate' as the argument.
"""
- if drop_prob == 0. or not training:
+ if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
@@ -27,10 +23,11 @@
random_tensor.div_(keep_prob)
return x * random_tensor
+
class DropPath(nn.Module):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- """
- def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
@@ -39,10 +36,7 @@
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
- return f'drop_prob={round(self.drop_prob,3):0.3f}'
-
-
-
+ return f"drop_prob={round(self.drop_prob,3):0.3f}"
# From PyTorch internals
@@ -51,6 +45,7 @@
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return tuple(x)
return tuple(repeat(x, n))
+
return parse
@@ -60,19 +55,20 @@
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
+
class Mlp(nn.Module):
- """ MLP as used in Vision Transformer, MLP-Mixer and related networks
- """
+ """MLP as used in Vision Transformer, MLP-Mixer and related networks"""
+
def __init__(
- self,
- in_features,
- hidden_features=None,
- out_features=None,
- act_layer=nn.GELU,
- norm_layer=None,
- bias=True,
- drop=0.,
- use_conv=False,
+ self,
+ in_features,
+ hidden_features=None,
+ out_features=None,
+ act_layer=nn.GELU,
+ norm_layer=None,
+ bias=True,
+ drop=0.0,
+ use_conv=False,
):
super().__init__()
out_features = out_features or in_features
@@ -96,5 +92,3 @@
x = self.fc2(x)
x = self.drop2(x)
return x
-
-
--
Gitblit v1.9.1