danielcd99 commited on
Commit
b025479
·
1 Parent(s): b4b5fb6

Trying to fix app.py

Browse files
Files changed (2) hide show
  1. app.py +35 -0
  2. train.py +0 -90
app.py CHANGED
@@ -1,9 +1,44 @@
1
  import gradio as gr
2
  import torch
 
3
 
4
  labels = ['Zero','Um','Dois','Três','Quatro','Cinco','Seis','Sete','Oito', 'Nove']
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  # LOADING MODEL
 
7
  model.load_state_dict(torch.load("model_mnist.pth", map_location=torch.device('cuda')))
8
 
9
 
 
1
  import gradio as gr
2
  import torch
3
+ from torch import nn
4
 
5
  labels = ['Zero','Um','Dois','Três','Quatro','Cinco','Seis','Sete','Oito', 'Nove']
6
 
7
+ if torch.cuda.is_available():
8
+ device = torch.device("cuda:0")
9
+ print("GPU")
10
+ else:
11
+ device = torch.device("cpu")
12
+ print("CPU")
13
+
14
+
15
+ # NEURAL NETWORK
16
+ class LeNet(nn.Module):
17
+ def __init__(self):
18
+ super(LeNet, self).__init__()
19
+
20
+ self.convs = nn.Sequential(
21
+ nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(5, 5)),
22
+ nn.Tanh(),
23
+ nn.AvgPool2d(2, 2),
24
+
25
+ nn.Conv2d(in_channels=4, out_channels=12, kernel_size=(5, 5)),
26
+ nn.Tanh(),
27
+ nn.AvgPool2d(2, 2)
28
+ )
29
+
30
+ self.linear = nn.Sequential(
31
+ nn.Linear(4*4*12,10)
32
+ )
33
+
34
+ def forward(self, x):
35
+ x = self.convs(x)
36
+ x = torch.flatten(x, 1)
37
+
38
+ return self.linear(x)
39
+
40
  # LOADING MODEL
41
+ model = LeNet().to(device)
42
  model.load_state_dict(torch.load("model_mnist.pth", map_location=torch.device('cuda')))
43
 
44
 
train.py DELETED
@@ -1,90 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import torchvision
5
- import torchvision.transforms as transforms
6
- import matplotlib.pyplot as plt
7
-
8
-
9
- if torch.cuda.is_available():
10
- device = torch.device("cuda:0")
11
- print("GPU")
12
- else:
13
- device = torch.device("cpu")
14
- print("CPU")
15
-
16
- # MNIST dataset
17
- batch_size=64
18
-
19
- train_dataset = torchvision.datasets.MNIST(root='./data',
20
- train=True,
21
- transform=transforms.ToTensor(),
22
- download=True)
23
-
24
- test_dataset = torchvision.datasets.MNIST(root='./data',
25
- train=False,
26
- transform=transforms.ToTensor())
27
-
28
- # Data loader
29
- train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
30
- batch_size=batch_size,
31
- shuffle=True)
32
-
33
- test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
34
- batch_size=batch_size,
35
- shuffle=False)
36
-
37
-
38
- # NEURAL NETWORK
39
- class LeNet(nn.Module):
40
- def __init__(self):
41
- super(LeNet, self).__init__()
42
-
43
- self.convs = nn.Sequential(
44
- nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(5, 5)),
45
- nn.Tanh(),
46
- nn.AvgPool2d(2, 2),
47
-
48
- nn.Conv2d(in_channels=4, out_channels=12, kernel_size=(5, 5)),
49
- nn.Tanh(),
50
- nn.AvgPool2d(2, 2)
51
- )
52
-
53
- self.linear = nn.Sequential(
54
- nn.Linear(4*4*12,10)
55
- )
56
-
57
- def forward(self, x):
58
- x = self.convs(x)
59
- x = torch.flatten(x, 1)
60
-
61
- return self.linear(x)
62
-
63
-
64
-
65
- # TRAIN PARAMETERS
66
- criterion = nn.CrossEntropyLoss()
67
- model_adam = LeNet().to(device)
68
- optimizer = torch.optim.Adam(model_adam.parameters(), lr=0.05)
69
- n_steps = len(train_loader)
70
- num_epochs = 10
71
-
72
- # TRAIN
73
- def train(model):
74
- for epoch in range(num_epochs):
75
- for i, (images, labels) in enumerate(train_loader):
76
-
77
- images = images.to(device)
78
- labels = labels.to(device)
79
-
80
- # Forward pass
81
- outputs = model(images)
82
- loss = criterion(outputs, labels)
83
-
84
- # Backward and optimize
85
- optimizer.zero_grad()
86
- loss.backward()
87
- optimizer.step()
88
-
89
- # SAVING MODEL
90
- torch.save(model_adam.state_dict(), "model_mnist.pth")