Spaces:
Runtime error
Runtime error
Add gradio app
Browse files- gradio_app.py +30 -0
- models/void_20230522_223553.pth +0 -0
- pipelines/train.py +16 -11
gradio_app.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
from cnn import CNNetwork
|
5 |
+
from server.preprocess import process_raw_wav, _wav_to_spec
|
6 |
+
|
7 |
+
model = CNNetwork()
|
8 |
+
state_dict = torch.load('models/void_20230522_223553.pth')
|
9 |
+
model.load_state_dict(state_dict)
|
10 |
+
|
11 |
+
LABELS = ["shafqat", "aman", "jake"]
|
12 |
+
|
13 |
+
|
14 |
+
def greet(input):
|
15 |
+
sr, wav = input
|
16 |
+
|
17 |
+
wav = torch.tensor([wav]).float()
|
18 |
+
wav = process_raw_wav(wav, sr, 48000, 3)
|
19 |
+
wav = _wav_to_spec(wav, 48000)
|
20 |
+
|
21 |
+
model_input = wav.unsqueeze(0)
|
22 |
+
output = model(model_input)
|
23 |
+
print(output)
|
24 |
+
|
25 |
+
prediction_index = torch.argmax(output, 1).item()
|
26 |
+
return LABELS[prediction_index]
|
27 |
+
|
28 |
+
demo = gr.Interface(fn=greet, inputs="mic", outputs="text")
|
29 |
+
|
30 |
+
demo.launch()
|
models/void_20230522_223553.pth
ADDED
Binary file (655 kB). View file
|
|
pipelines/train.py
CHANGED
@@ -20,11 +20,11 @@ from cnn import CNNetwork
|
|
20 |
|
21 |
# script defaults
|
22 |
BATCH_SIZE = 128
|
23 |
-
EPOCHS =
|
24 |
LEARNING_RATE = 0.001
|
25 |
|
26 |
-
TRAIN_FILE="data/train"
|
27 |
-
TEST_FILE="data/test"
|
28 |
SAMPLE_RATE=48000
|
29 |
|
30 |
stub = Stub(
|
@@ -33,7 +33,7 @@ stub = Stub(
|
|
33 |
)
|
34 |
|
35 |
@stub.function(
|
36 |
-
gpu=
|
37 |
mounts=[
|
38 |
Mount.from_local_file(local_path='dataset.py'),
|
39 |
Mount.from_local_file(local_path='cnn.py'),
|
@@ -49,6 +49,7 @@ def train(
|
|
49 |
origin_device="cuda",
|
50 |
epochs=10,
|
51 |
test_dataloader=None,
|
|
|
52 |
):
|
53 |
import os
|
54 |
|
@@ -72,7 +73,8 @@ def train(
|
|
72 |
testing_acc = []
|
73 |
testing_loss = []
|
74 |
|
75 |
-
|
|
|
76 |
|
77 |
for i in range(epochs):
|
78 |
print(f"Epoch {i + 1}/{epochs}")
|
@@ -84,7 +86,8 @@ def train(
|
|
84 |
# training metrics
|
85 |
training_loss.append(train_epoch_loss/len(train_dataloader))
|
86 |
training_acc.append(train_epoch_acc/len(train_dataloader))
|
87 |
-
|
|
|
88 |
|
89 |
now = time.time()
|
90 |
print("Training Loss: {:.2f}, Training Accuracy: {:.4f}, Time: {:.2f}s".format(training_loss[i], training_acc[i], now - then))
|
@@ -97,9 +100,10 @@ def train(
|
|
97 |
testing_loss.append(test_epoch_loss/len(test_dataloader))
|
98 |
testing_acc.append(test_epoch_acc/len(test_dataloader))
|
99 |
|
100 |
-
print("Testing Loss: {:.2f}, Testing Accuracy {:.
|
101 |
|
102 |
-
|
|
|
103 |
|
104 |
print ("-------------------------------------------------------- \n")
|
105 |
|
@@ -115,7 +119,8 @@ def train(
|
|
115 |
mounts=[
|
116 |
Mount.from_local_file(local_path='dataset.py'),
|
117 |
Mount.from_local_file(local_path='cnn.py'),
|
118 |
-
]
|
|
|
119 |
)
|
120 |
def train_epoch(model, train_dataloader, loss_fn, optimizer, device):
|
121 |
import torch
|
@@ -148,7 +153,7 @@ def train_epoch(model, train_dataloader, loss_fn, optimizer, device):
|
|
148 |
return model, train_loss, train_acc
|
149 |
|
150 |
@stub.function(
|
151 |
-
gpu=
|
152 |
mounts=[
|
153 |
Mount.from_local_file(local_path='dataset.py'),
|
154 |
Mount.from_local_file(local_path='cnn.py'),
|
@@ -219,7 +224,7 @@ def main():
|
|
219 |
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
220 |
|
221 |
# train model
|
222 |
-
model = train.call(model, train_dataloader, loss_fn, optimizer, device, EPOCHS, test_dataloader)
|
223 |
|
224 |
# save model
|
225 |
save_model(model)
|
|
|
20 |
|
21 |
# script defaults
|
22 |
BATCH_SIZE = 128
|
23 |
+
EPOCHS = 100
|
24 |
LEARNING_RATE = 0.001
|
25 |
|
26 |
+
TRAIN_FILE="data/aisf/augmented/train"
|
27 |
+
TEST_FILE="data/aisf/augmented/test"
|
28 |
SAMPLE_RATE=48000
|
29 |
|
30 |
stub = Stub(
|
|
|
33 |
)
|
34 |
|
35 |
@stub.function(
|
36 |
+
gpu=gpu.A100(memory=20),
|
37 |
mounts=[
|
38 |
Mount.from_local_file(local_path='dataset.py'),
|
39 |
Mount.from_local_file(local_path='cnn.py'),
|
|
|
49 |
origin_device="cuda",
|
50 |
epochs=10,
|
51 |
test_dataloader=None,
|
52 |
+
wandb_enabled=False,
|
53 |
):
|
54 |
import os
|
55 |
|
|
|
73 |
testing_acc = []
|
74 |
testing_loss = []
|
75 |
|
76 |
+
if wandb_enabled:
|
77 |
+
wandb.init(project="void-training")
|
78 |
|
79 |
for i in range(epochs):
|
80 |
print(f"Epoch {i + 1}/{epochs}")
|
|
|
86 |
# training metrics
|
87 |
training_loss.append(train_epoch_loss/len(train_dataloader))
|
88 |
training_acc.append(train_epoch_acc/len(train_dataloader))
|
89 |
+
if wandb_enabled:
|
90 |
+
wandb.log({'training_loss': training_loss[i], 'training_acc': training_acc[i]})
|
91 |
|
92 |
now = time.time()
|
93 |
print("Training Loss: {:.2f}, Training Accuracy: {:.4f}, Time: {:.2f}s".format(training_loss[i], training_acc[i], now - then))
|
|
|
100 |
testing_loss.append(test_epoch_loss/len(test_dataloader))
|
101 |
testing_acc.append(test_epoch_acc/len(test_dataloader))
|
102 |
|
103 |
+
print("Testing Loss: {:.2f}, Testing Accuracy {:.4f}".format(testing_loss[i], testing_acc[i]))
|
104 |
|
105 |
+
if wandb_enabled:
|
106 |
+
wandb.log({'testing_loss': testing_loss[i], 'testing_acc': testing_acc[i]})
|
107 |
|
108 |
print ("-------------------------------------------------------- \n")
|
109 |
|
|
|
119 |
mounts=[
|
120 |
Mount.from_local_file(local_path='dataset.py'),
|
121 |
Mount.from_local_file(local_path='cnn.py'),
|
122 |
+
],
|
123 |
+
timeout=600,
|
124 |
)
|
125 |
def train_epoch(model, train_dataloader, loss_fn, optimizer, device):
|
126 |
import torch
|
|
|
153 |
return model, train_loss, train_acc
|
154 |
|
155 |
@stub.function(
|
156 |
+
gpu=gpu.A100(memory=20),
|
157 |
mounts=[
|
158 |
Mount.from_local_file(local_path='dataset.py'),
|
159 |
Mount.from_local_file(local_path='cnn.py'),
|
|
|
224 |
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
225 |
|
226 |
# train model
|
227 |
+
model = train.call(model, train_dataloader, loss_fn, optimizer, device, EPOCHS, test_dataloader, True)
|
228 |
|
229 |
# save model
|
230 |
save_model(model)
|