eaglelandsonce commited on
Commit
7f75a02
·
verified ·
1 Parent(s): a8d680c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -89
app.py CHANGED
@@ -1,90 +1,55 @@
 
 
1
  import streamlit as st
2
- import torch
3
- import torch.nn as nn
4
- import torch.optim as optim
5
- import torchvision
6
- import torchvision.transforms as transforms
7
- import matplotlib.pyplot as plt
8
-
9
- # Define the neural network
10
- class Net(nn.Module):
11
- def __init__(self):
12
- super(Net, self).__init__()
13
- self.fc1 = nn.Linear(28 * 28, 128)
14
- self.fc2 = nn.Linear(128, 64)
15
- self.fc3 = nn.Linear(64, 10)
16
-
17
- def forward(self, x):
18
- x = x.view(-1, 28 * 28)
19
- x = torch.relu(self.fc1(x))
20
- x = torch.relu(self.fc2(x))
21
- x = self.fc3(x)
22
- return x
23
-
24
- # Function to train the model
25
- def train_model(num_epochs):
26
- # Define transformations
27
- transform = transforms.Compose([
28
- transforms.ToTensor(),
29
- transforms.Normalize((0.5,), (0.5,))
30
- ])
31
-
32
- # Load datasets
33
- trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
34
- trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
35
-
36
- testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
37
- testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)
38
-
39
- # Initialize the network, loss function, and optimizer
40
- net = Net()
41
- criterion = nn.CrossEntropyLoss()
42
- optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
43
-
44
- # Track loss over epochs
45
- loss_values = []
46
-
47
- # Training loop
48
- for epoch in range(num_epochs):
49
- running_loss = 0.0
50
- for i, data in enumerate(trainloader, 0):
51
- inputs, labels = data
52
- optimizer.zero_grad()
53
- outputs = net(inputs)
54
- loss = criterion(outputs, labels)
55
- loss.backward()
56
- optimizer.step()
57
- running_loss += loss.item()
58
-
59
- # Append average loss for this epoch
60
- loss_values.append(running_loss / len(trainloader))
61
- st.write(f'Epoch {epoch + 1}, Loss: {running_loss / len(trainloader):.3f}')
62
-
63
- st.write('Finished Training')
64
-
65
- # Plot the loss values
66
- plt.figure(figsize=(10, 5))
67
- plt.plot(range(1, num_epochs + 1), loss_values, marker='o')
68
- plt.title('Training Loss over Epochs')
69
- plt.xlabel('Epoch')
70
- plt.ylabel('Loss')
71
- st.pyplot(plt)
72
-
73
- # Evaluate the network on the test data
74
- correct = 0
75
- total = 0
76
- with torch.no_grad():
77
- for data in testloader:
78
- images, labels = data
79
- outputs = net(images)
80
- _, predicted = torch.max(outputs.data, 1)
81
- total += labels.size(0)
82
- correct += (predicted == labels).sum().item()
83
-
84
- st.write(f'Accuracy of the network on the 10000 test images: {100 * correct / total}%')
85
-
86
- # Streamlit interface
87
- st.title('MNIST Digit Classification with PyTorch')
88
- num_epochs = st.number_input('Enter number of epochs:', min_value=1, max_value=100, value=10)
89
- if st.button('Run'):
90
- train_model(num_epochs)
 
1
+
2
+
3
  import streamlit as st
4
+
5
+ # Introduction
6
+ st.title("PyTorch Hands-On Course")
7
+ st.write("""
8
+ ### Introduction
9
+
10
+ Welcome to the PyTorch Hands-On Course! This course is designed to provide you with a practical understanding of PyTorch, a powerful and flexible deep learning framework. Throughout this course, you'll engage with ten carefully crafted exercises that will guide you from the basics of tensor operations to advanced topics such as convolutional neural networks, recurrent neural networks, and generative adversarial networks. By the end of this course, you'll have the skills and confidence to build, train, and deploy your own deep learning models using PyTorch.
11
+ """)
12
+
13
+ # Course Content
14
+ st.write("""
15
+ ### Course Exercises
16
+
17
+ 1. **Introduction to PyTorch Tensors**
18
+ - **Exercise**: Create and manipulate tensors. Perform basic tensor operations such as addition, subtraction, and element-wise multiplication.
19
+
20
+ 2. **Linear Regression with PyTorch**
21
+ - **Exercise**: Implement a simple linear regression model from scratch using PyTorch. Train the model on a small dataset and visualize the results.
22
+
23
+ 3. **Logistic Regression for Binary Classification**
24
+ - **Exercise**: Build and train a logistic regression model for a binary classification task (e.g., predicting whether a student passes or fails based on study hours).
25
+
26
+ 4. **Feedforward Neural Network (FFNN)**
27
+ - **Exercise**: Implement a multi-layer feedforward neural network for a classification task (e.g., MNIST digit classification). Use PyTorch’s built-in modules and functions.
28
+
29
+ 5. **Convolutional Neural Networks (CNNs)**
30
+ - **Exercise**: Develop a CNN for image classification using a dataset like CIFAR-10. Include layers such as convolutional, pooling, and fully connected layers.
31
+
32
+ 6. **Recurrent Neural Networks (RNNs) and LSTMs**
33
+ - **Exercise**: Create an RNN or LSTM network to perform sequence prediction (e.g., text generation or time series forecasting).
34
+
35
+ 7. **Transfer Learning with Pre-trained Models**
36
+ - **Exercise**: Use a pre-trained model (e.g., ResNet, VGG) from the PyTorch library for a custom image classification task. Fine-tune the model on a new dataset.
37
+
38
+ 8. **Natural Language Processing (NLP) with PyTorch**
39
+ - **Exercise**: Implement a text classification model using an RNN or a Transformer-based model. Train the model on a dataset like IMDB movie reviews for sentiment analysis.
40
+
41
+ 9. **Generative Adversarial Networks (GANs)**
42
+ - **Exercise**: Build and train a simple GAN to generate synthetic images. Use a dataset like MNIST to generate handwritten digits.
43
+
44
+ 10. **Deploying PyTorch Models**
45
+ - **Exercise**: Learn how to save, load, and deploy PyTorch models. Create a simple web application using Flask to serve a trained model for inference.
46
+ """)
47
+
48
+ # Conclusion
49
+ st.write("""
50
+ ### Conclusion
51
+
52
+ Congratulations on completing the PyTorch Hands-On Course! You've taken a significant step in your journey as a deep learning practitioner. Through these exercises, you've learned how to create and manipulate tensors, build and train various types of neural networks, and even deploy your models. PyTorch's versatility and ease of use make it an excellent choice for both research and production applications. Keep experimenting, stay curious, and continue to build upon the foundation you've established here. Happy coding!
53
+ """)
54
+
55
+