eaglelandsonce commited on
Commit
2a96c4e
·
verified ·
1 Parent(s): 0d51b23

Create 1_TensorIntro.py

Browse files
Files changed (1) hide show
  1. pages/1_TensorIntro.py +284 -0
pages/1_TensorIntro.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import io
4
+ import sys
5
+
6
+ # Function to execute the input code and capture print statements
7
+ def execute_code(code):
8
+ # Redirect stdout to capture print statements
9
+ old_stdout = sys.stdout
10
+ sys.stdout = mystdout = io.StringIO()
11
+
12
+ global_vars = {"torch": torch}
13
+ local_vars = {}
14
+ try:
15
+ exec(code, global_vars, local_vars)
16
+ output = mystdout.getvalue()
17
+ except Exception as e:
18
+ output = str(e)
19
+ finally:
20
+ # Reset redirect.
21
+ sys.stdout = old_stdout
22
+
23
+ return output, local_vars
24
+
25
+ # Dictionary with exercise details
26
+
27
+ exercises = {
28
+ "Exercise 1: Create and Manipulate Tensors": {
29
+ "description": "Tensors are the core data structure in PyTorch, similar to arrays in NumPy but with additional capabilities for GPU acceleration. This exercise introduces how to create tensors from various data sources such as lists and NumPy arrays. It also covers basic tensor operations like addition, subtraction, and element-wise multiplication, which are fundamental for manipulating data in PyTorch.",
30
+ "code": '''import torch
31
+ import numpy as np
32
+
33
+ # Creating tensors from Python lists
34
+ # This creates a 1D tensor from the list [1, 2, 3]
35
+ tensor_from_list = torch.tensor([1, 2, 3])
36
+ print("Tensor from list:", tensor_from_list)
37
+
38
+ # Creating tensors from NumPy arrays
39
+ # This converts a NumPy array to a tensor
40
+ numpy_array = np.array([4, 5, 6])
41
+ tensor_from_numpy = torch.tensor(numpy_array)
42
+ print("Tensor from NumPy array:", tensor_from_numpy)
43
+
44
+ # Performing basic tensor operations
45
+ tensor1 = torch.tensor([1, 2, 3])
46
+ tensor2 = torch.tensor([4, 5, 6])
47
+
48
+ # Addition
49
+ addition = tensor1 + tensor2
50
+ print("Addition:", addition)
51
+
52
+ # Subtraction
53
+ subtraction = tensor1 - tensor2
54
+ print("Subtraction:", subtraction)
55
+
56
+ # Element-wise multiplication
57
+ elementwise_multiplication = tensor1 * tensor2
58
+ print("Element-wise Multiplication:", elementwise_multiplication)
59
+ '''
60
+ },
61
+ "Exercise 2: Tensor Indexing and Slicing": {
62
+ "description": "Indexing and slicing allow you to access and manipulate specific elements and sub-tensors. This is crucial for tasks such as data preprocessing and manipulation in machine learning workflows. This exercise demonstrates how to index and slice tensors to extract and modify elements efficiently.",
63
+ "code": '''import torch
64
+
65
+ # Creating a 2D tensor (matrix)
66
+ tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
67
+
68
+ # Indexing elements
69
+ # Accessing the element at the 2nd row and 3rd column (indexing starts at 0)
70
+ element = tensor[1, 2]
71
+ print("Element at index [1, 2]:", element)
72
+
73
+ # Slicing sub-tensors
74
+ # Extracting the entire second row
75
+ row = tensor[1, :]
76
+ print("Second row:", row)
77
+
78
+ # Extracting the entire third column
79
+ column = tensor[:, 2]
80
+ print("Third column:", column)
81
+
82
+ # Modifying elements
83
+ # Changing the first element of the tensor to 10
84
+ tensor[0, 0] = 10
85
+ print("Modified tensor:", tensor)
86
+ '''
87
+ },
88
+ "Exercise 3: Reshaping and Transposing Tensors": {
89
+ "description": "Reshaping and transposing tensors are common operations in machine learning workflows, especially when preparing data for model training. This exercise covers how to reshape tensors using view, squeeze, and unsqueeze, as well as how to transpose tensors.",
90
+ "code": '''import torch
91
+
92
+ # Creating a 2D tensor
93
+ tensor = torch.tensor([[1, 2, 3], [4, 5, 6]])
94
+
95
+ # Reshaping a tensor
96
+ reshaped_tensor = tensor.view(3, 2)
97
+ print("Reshaped tensor:", reshaped_tensor)
98
+
99
+ # Squeezing a tensor (removing dimensions of size 1)
100
+ squeezed_tensor = torch.tensor([[1], [2], [3]]).squeeze()
101
+ print("Squeezed tensor:", squeezed_tensor)
102
+
103
+ # Unsqueezing a tensor (adding dimensions of size 1)
104
+ unsqueezed_tensor = squeezed_tensor.unsqueeze(1)
105
+ print("Unsqueezed tensor:", unsqueezed_tensor)
106
+
107
+ # Transposing a tensor
108
+ transposed_tensor = tensor.t()
109
+ print("Transposed tensor:", transposed_tensor)
110
+ '''
111
+ },
112
+ "Exercise 4: Tensor Operations for Deep Learning": {
113
+ "description": "Deep learning requires various tensor operations such as matrix multiplication and element-wise operations. This exercise demonstrates how to perform matrix multiplication, calculate the dot product, and transpose tensors, which are essential for building neural networks.",
114
+ "code": '''import torch
115
+
116
+ # Creating tensors for matrix multiplication
117
+ a = torch.tensor([[1, 2], [3, 4]])
118
+ b = torch.tensor([[5, 6], [7, 8]])
119
+
120
+ # Matrix multiplication
121
+ matrix_multiplication = torch.matmul(a, b)
122
+ print("Matrix multiplication result:", matrix_multiplication)
123
+
124
+ # Transposing a tensor
125
+ transposed_a = a.t()
126
+ print("Transposed tensor:", transposed_a)
127
+
128
+ # Calculating the dot product
129
+ dot_product = torch.dot(torch.tensor([1, 2]), torch.tensor([3, 4]))
130
+ print("Dot product result:", dot_product)
131
+ '''
132
+ },
133
+ "Exercise 5: Tensors and Gradients": {
134
+ "description": "Gradients are essential for optimizing neural networks during training. This exercise introduces the autograd feature in PyTorch, showing how to compute gradients using backpropagation.",
135
+ "code": '''import torch
136
+
137
+ # Creating a tensor with gradient tracking enabled
138
+ x = torch.tensor([2.0, 3.0], requires_grad=True)
139
+
140
+ # Performing operations on the tensor
141
+ y = x * 2
142
+ z = y.mean()
143
+
144
+ # Backpropagation to compute gradients
145
+ z.backward()
146
+
147
+ # Printing the gradients
148
+ print("Gradients of x:", x.grad)
149
+
150
+ # Disabling gradient tracking
151
+ with torch.no_grad():
152
+ y = x * 2
153
+ print("Result with no gradient tracking:", y)
154
+ '''
155
+ },
156
+ "Exercise 6: Practical Tensor Exercises - Custom Layers": {
157
+ "description": "Implementing custom layers and activation functions is crucial for creating neural networks tailored to specific tasks. This exercise guides you through creating a simple linear layer and a ReLU activation function.",
158
+ "code": '''import torch
159
+
160
+ # Implementing a custom linear layer
161
+ class LinearLayer:
162
+ def __init__(self, input_dim, output_dim):
163
+ self.weights = torch.randn(input_dim, output_dim, requires_grad=True)
164
+ self.bias = torch.randn(output_dim, requires_grad=True)
165
+
166
+ def forward(self, x):
167
+ return torch.matmul(x, self.weights) + self.bias
168
+
169
+ # Creating an instance of the custom linear layer
170
+ layer = LinearLayer(2, 1)
171
+
172
+ # Passing a tensor through the layer
173
+ input_tensor = torch.tensor([[1.0, 2.0]])
174
+ output_tensor = layer.forward(input_tensor)
175
+ print("Output of the custom linear layer:", output_tensor)
176
+
177
+ # Implementing a custom ReLU activation function
178
+ def relu(x):
179
+ return torch.max(torch.tensor(0.0), x)
180
+
181
+ # Applying the ReLU activation function
182
+ relu_output = relu(torch.tensor([-1.0, 2.0, -0.5, 3.0]))
183
+ print("Output of the custom ReLU function:", relu_output)
184
+ '''
185
+ },
186
+ "Exercise 7: Data Normalization with Tensors": {
187
+ "description": "Data normalization is a key preprocessing step in machine learning. This exercise demonstrates how to normalize data using Min-Max normalization, which scales the data to a specific range.",
188
+ "code": '''import torch
189
+
190
+ # Function for Min-Max normalization
191
+ def min_max_normalize(tensor):
192
+ min_val = tensor.min()
193
+ max_val = tensor.max()
194
+ return (tensor - min_val) / (max_val - min_val)
195
+
196
+ # Creating a tensor with sample data
197
+ data = torch.tensor([10, 20, 30, 40, 50])
198
+
199
+ # Applying Min-Max normalization
200
+ normalized_data = min_max_normalize(data)
201
+ print("Normalized data:", normalized_data)
202
+ '''
203
+ },
204
+ "Final Project: Training a Simple Neural Network on MNIST": {
205
+ "description": "This project involves building and training a simple neural network on the MNIST dataset. It encompasses loading the dataset, defining the network architecture, and implementing the training loop with loss computation and backpropagation.",
206
+ "code": '''import torch
207
+ import torch.nn as nn
208
+ import torch.optim as optim
209
+ from torchvision import datasets, transforms
210
+
211
+ # Define a simple neural network
212
+ class SimpleNN(nn.Module):
213
+ def __init__(self):
214
+ super(SimpleNN, self).__init__()
215
+ self.fc1 = nn.Linear(28*28, 128)
216
+ self.fc2 = nn.Linear(128, 10)
217
+
218
+ def forward(self, x):
219
+ x = x.view(-1, 28*28)
220
+ x = torch.relu(self.fc1(x))
221
+ x = self.fc2(x)
222
+ return x
223
+
224
+ # Load dataset
225
+ transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
226
+ trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
227
+ trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
228
+
229
+ # Initialize network, loss function, and optimizer
230
+ model = SimpleNN()
231
+ criterion = nn.CrossEntropyLoss()
232
+ optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
233
+
234
+ # Training loop
235
+ for epoch in range(2): # loop over the dataset multiple times
236
+ for inputs, labels in trainloader:
237
+ # Zero the parameter gradients
238
+ optimizer.zero_grad()
239
+ # Forward pass
240
+ outputs = model(inputs)
241
+ loss = criterion(outputs, labels)
242
+ # Backward pass and optimize
243
+ loss.backward()
244
+ optimizer.step()
245
+
246
+ print('Finished Training')
247
+ '''
248
+ },
249
+ }
250
+
251
+
252
+
253
+
254
+
255
+
256
+ st.title('PyTorch Code Runner')
257
+
258
+ # Side menu for exercises
259
+ exercise_choice = st.sidebar.radio("Choose an exercise", list(exercises.keys()))
260
+
261
+ # Display the chosen exercise description
262
+ st.subheader(exercise_choice)
263
+ st.write(exercises[exercise_choice]["description"])
264
+
265
+ # Text area for inputting the PyTorch code
266
+ code_input = st.text_area("Enter your PyTorch code here", height=300, value=exercises[exercise_choice]["code"])
267
+
268
+ # Button to execute the code
269
+ if st.button("Run Code"):
270
+ # Prepend the import statement
271
+ code_to_run = "import torch\n" + code_input
272
+
273
+ # Execute the code and capture the output
274
+ output, variables = execute_code(code_to_run)
275
+
276
+ # Display the output
277
+ st.subheader('Output')
278
+ st.text(output)
279
+
280
+ # Display returned variables
281
+ if variables:
282
+ st.subheader('Variables')
283
+ for key, value in variables.items():
284
+ st.text(f"{key}: {value}")