Spaces:
Running
Running
Upload 4 files
Browse files
00001.wav
ADDED
Binary file (268 kB). View file
|
|
00002.wav
ADDED
Binary file (238 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from model import ECAPA_gender
|
4 |
+
|
5 |
+
# Load the model
|
6 |
+
model = ECAPA_gender.from_pretrained('JaesungHuh/ecapa-gender')
|
7 |
+
model.eval()
|
8 |
+
|
9 |
+
def predict_gender(filepath):
|
10 |
+
audio = model.load_audio(filepath)
|
11 |
+
with torch.no_grad():
|
12 |
+
output = model.forward(audio)
|
13 |
+
probs = torch.softmax(output, dim=1)
|
14 |
+
prob_dict = {'Human ' + model.pred2gender[i]: float(prob) for i, prob in enumerate(probs[0])}
|
15 |
+
return prob_dict
|
16 |
+
|
17 |
+
audio_component = gr.Audio(type='filepath', label='Upload your audio file here')
|
18 |
+
label_component = gr.Label(label='Gender classification result')
|
19 |
+
demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs=label_component, examples=['00001.wav', '00002.wav'])
|
20 |
+
demo.launch()
|
model.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
|
8 |
+
import torchaudio
|
9 |
+
from torchaudio.functional import resample
|
10 |
+
|
11 |
+
from huggingface_hub import PyTorchModelHubMixin
|
12 |
+
|
13 |
+
|
14 |
+
class SEModule(nn.Module):
|
15 |
+
def __init__(self, channels : int , bottleneck : int = 128) -> None:
|
16 |
+
super(SEModule, self).__init__()
|
17 |
+
self.se = nn.Sequential(
|
18 |
+
nn.AdaptiveAvgPool1d(1),
|
19 |
+
nn.Conv1d(channels, bottleneck, kernel_size=1, padding=0),
|
20 |
+
nn.ReLU(),
|
21 |
+
# nn.BatchNorm1d(bottleneck), # I remove this layer
|
22 |
+
nn.Conv1d(bottleneck, channels, kernel_size=1, padding=0),
|
23 |
+
nn.Sigmoid(),
|
24 |
+
)
|
25 |
+
|
26 |
+
def forward(self, input : torch.Tensor) -> torch.Tensor:
|
27 |
+
x = self.se(input)
|
28 |
+
return input * x
|
29 |
+
|
30 |
+
|
31 |
+
class Bottle2neck(nn.Module):
|
32 |
+
def __init__(self, inplanes : int, planes : int, kernel_size : Optional[int] = None, dilation : Optional[int] = None, scale : int = 8) -> None:
|
33 |
+
super(Bottle2neck, self).__init__()
|
34 |
+
width = int(math.floor(planes / scale))
|
35 |
+
self.conv1 = nn.Conv1d(inplanes, width*scale, kernel_size=1)
|
36 |
+
self.bn1 = nn.BatchNorm1d(width*scale)
|
37 |
+
self.nums = scale -1
|
38 |
+
convs = []
|
39 |
+
bns = []
|
40 |
+
num_pad = math.floor(kernel_size/2)*dilation
|
41 |
+
for i in range(self.nums):
|
42 |
+
convs.append(nn.Conv1d(width, width, kernel_size=kernel_size, dilation=dilation, padding=num_pad))
|
43 |
+
bns.append(nn.BatchNorm1d(width))
|
44 |
+
self.convs = nn.ModuleList(convs)
|
45 |
+
self.bns = nn.ModuleList(bns)
|
46 |
+
self.conv3 = nn.Conv1d(width*scale, planes, kernel_size=1)
|
47 |
+
self.bn3 = nn.BatchNorm1d(planes)
|
48 |
+
self.relu = nn.ReLU()
|
49 |
+
self.width = width
|
50 |
+
self.se = SEModule(planes)
|
51 |
+
|
52 |
+
def forward(self, x : torch.Tensor) -> torch.Tensor:
|
53 |
+
residual = x
|
54 |
+
out = self.conv1(x)
|
55 |
+
out = self.relu(out)
|
56 |
+
out = self.bn1(out)
|
57 |
+
|
58 |
+
spx = torch.split(out, self.width, 1)
|
59 |
+
for i in range(self.nums):
|
60 |
+
if i==0:
|
61 |
+
sp = spx[i]
|
62 |
+
else:
|
63 |
+
sp = sp + spx[i]
|
64 |
+
sp = self.convs[i](sp)
|
65 |
+
sp = self.relu(sp)
|
66 |
+
sp = self.bns[i](sp)
|
67 |
+
if i==0:
|
68 |
+
out = sp
|
69 |
+
else:
|
70 |
+
out = torch.cat((out, sp), 1)
|
71 |
+
out = torch.cat((out, spx[self.nums]),1)
|
72 |
+
|
73 |
+
out = self.conv3(out)
|
74 |
+
out = self.relu(out)
|
75 |
+
out = self.bn3(out)
|
76 |
+
|
77 |
+
out = self.se(out)
|
78 |
+
out += residual
|
79 |
+
return out
|
80 |
+
|
81 |
+
|
82 |
+
class ECAPA_gender(nn.Module, PyTorchModelHubMixin):
|
83 |
+
def __init__(self, C : int = 1024):
|
84 |
+
super(ECAPA_gender, self).__init__()
|
85 |
+
self.C = C
|
86 |
+
self.conv1 = nn.Conv1d(80, C, kernel_size=5, stride=1, padding=2)
|
87 |
+
self.relu = nn.ReLU()
|
88 |
+
self.bn1 = nn.BatchNorm1d(C)
|
89 |
+
self.layer1 = Bottle2neck(C, C, kernel_size=3, dilation=2, scale=8)
|
90 |
+
self.layer2 = Bottle2neck(C, C, kernel_size=3, dilation=3, scale=8)
|
91 |
+
self.layer3 = Bottle2neck(C, C, kernel_size=3, dilation=4, scale=8)
|
92 |
+
# I fixed the shape of the output from MFA layer, that is close to the setting from ECAPA paper.
|
93 |
+
self.layer4 = nn.Conv1d(3*C, 1536, kernel_size=1)
|
94 |
+
self.attention = nn.Sequential(
|
95 |
+
nn.Conv1d(4608, 256, kernel_size=1),
|
96 |
+
nn.ReLU(),
|
97 |
+
nn.BatchNorm1d(256),
|
98 |
+
nn.Tanh(), # I add this layer
|
99 |
+
nn.Conv1d(256, 1536, kernel_size=1),
|
100 |
+
nn.Softmax(dim=2),
|
101 |
+
)
|
102 |
+
self.bn5 = nn.BatchNorm1d(3072)
|
103 |
+
self.fc6 = nn.Linear(3072, 192)
|
104 |
+
self.bn6 = nn.BatchNorm1d(192)
|
105 |
+
self.fc7 = nn.Linear(192, 2)
|
106 |
+
self.pred2gender = {0 : 'male', 1 : 'female'}
|
107 |
+
|
108 |
+
def logtorchfbank(self, x : torch.Tensor) -> torch.Tensor:
|
109 |
+
# Preemphasis
|
110 |
+
flipped_filter = torch.FloatTensor([-0.97, 1.]).unsqueeze(0).unsqueeze(0)
|
111 |
+
x = x.unsqueeze(1)
|
112 |
+
x = F.pad(x, (1, 0), 'reflect')
|
113 |
+
x = F.conv1d(x, flipped_filter).squeeze(1)
|
114 |
+
|
115 |
+
# Melspectrogram
|
116 |
+
x = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=512, win_length=400, hop_length=160, \
|
117 |
+
f_min = 20, f_max = 7600, window_fn=torch.hamming_window, n_mels=80)(x) + 1e-6
|
118 |
+
|
119 |
+
# Log and normalize
|
120 |
+
x = x.log()
|
121 |
+
x = x - torch.mean(x, dim=-1, keepdim=True)
|
122 |
+
return x
|
123 |
+
|
124 |
+
def forward(self, x : torch.Tensor) -> torch.Tensor:
|
125 |
+
x = self.logtorchfbank(x)
|
126 |
+
|
127 |
+
x = self.conv1(x)
|
128 |
+
x = self.relu(x)
|
129 |
+
x = self.bn1(x)
|
130 |
+
|
131 |
+
x1 = self.layer1(x)
|
132 |
+
x2 = self.layer2(x+x1)
|
133 |
+
x3 = self.layer3(x+x1+x2)
|
134 |
+
|
135 |
+
x = self.layer4(torch.cat((x1,x2,x3),dim=1))
|
136 |
+
x = self.relu(x)
|
137 |
+
|
138 |
+
t = x.size()[-1]
|
139 |
+
|
140 |
+
global_x = torch.cat((x,torch.mean(x,dim=2,keepdim=True).repeat(1,1,t), torch.sqrt(torch.var(x,dim=2,keepdim=True).clamp(min=1e-4)).repeat(1,1,t)), dim=1)
|
141 |
+
|
142 |
+
w = self.attention(global_x)
|
143 |
+
|
144 |
+
mu = torch.sum(x * w, dim=2)
|
145 |
+
sg = torch.sqrt( ( torch.sum((x**2) * w, dim=2) - mu**2 ).clamp(min=1e-4) )
|
146 |
+
|
147 |
+
x = torch.cat((mu,sg),1)
|
148 |
+
x = self.bn5(x)
|
149 |
+
x = self.fc6(x)
|
150 |
+
x = self.bn6(x)
|
151 |
+
x = self.relu(x)
|
152 |
+
x = self.fc7(x)
|
153 |
+
|
154 |
+
return x
|
155 |
+
|
156 |
+
def load_audio(self, path : str) -> torch.Tensor:
|
157 |
+
audio, sr = torchaudio.load(path)
|
158 |
+
if sr != 16000:
|
159 |
+
audio = resample(audio, sr, 16000)
|
160 |
+
return audio
|
161 |
+
|
162 |
+
def predict(self, audio : torch.Tensor) -> torch.Tensor:
|
163 |
+
audio = self.load_audio(audio)
|
164 |
+
self.eval()
|
165 |
+
with torch.no_grad():
|
166 |
+
output = self.forward(audio)
|
167 |
+
_, pred = output.max(1)
|
168 |
+
return self.pred2gender[pred.item()]
|