MartialTerran commited on
Commit
eda3ba1
1 Parent(s): 42a5011

Create Pytorch_Sine-x_Function_NN-model.py

Browse files

Based on translation from https://github.com/Azacus1/Modelling-for-sin-wave-function/tree/main
https://github.com/Azacus1/Modelling-for-sin-wave-function/blob/main/Model.py

Files changed (1) hide show
  1. Pytorch_Sine-x_Function_NN-model.py +191 -0
Pytorch_Sine-x_Function_NN-model.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model to generate Sine(x) values.
2
+ # To do: add self.dropout = nn.Dropout(0.1) # Dropout layer
3
+ #Created on Thu Oct 28 12:18:37 2021
4
+ # @author: aman (translated to pytorch by Gemini)
5
+
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.optim as optim
10
+ import numpy as np
11
+ import pandas as pd
12
+ import matplotlib.pyplot as plt
13
+ import math
14
+
15
+ # Set seed for experiment reproducibility
16
+ seed = 1
17
+ np.random.seed(seed)
18
+ torch.manual_seed(seed)
19
+
20
+ # Number of sample datapoints
21
+ SAMPLES = 400
22
+ # Generate a uniformly distributed set of random numbers in the range from
23
+ # 0 to 2π, which covers a complete sine wave oscillation
24
+ x_values = np.random.uniform(
25
+ low=0, high=2*math.pi, size=SAMPLES).astype(np.float32)
26
+
27
+ print(x_values)
28
+ # Shuffle the values to guarantee they're not in order
29
+ np.random.shuffle(x_values)
30
+
31
+ # Calculate the corresponding sine values
32
+ y_values = np.sin(x_values).astype(np.float32)
33
+
34
+ # Plot our data. The 'b.' argument tells the library to print blue dots.
35
+ plt.plot(x_values, y_values, 'b.')
36
+ plt.show()
37
+
38
+ # Add a small random number to each y value
39
+ y_values += 0.01 * np.random.randn(*y_values.shape)
40
+
41
+ # Plot our data
42
+ plt.plot(x_values, y_values, 'b.')
43
+ plt.show()
44
+
45
+ # We'll use 60% of our data for training and 20% for testing. The remaining 20%
46
+ # will be used for validation. Calculate the indices of each section.
47
+ TRAIN_SPLIT = int(0.6 * SAMPLES)
48
+ TEST_SPLIT = int(0.2 * SAMPLES + TRAIN_SPLIT)
49
+
50
+ # Use np.split to chop our data into three parts.
51
+ # The second argument to np.split is an array of indices where the data will be
52
+ # split. We provide two indices, so the data will be divided into three chunks.
53
+ x_train, x_test, x_validate = np.split(x_values, [TRAIN_SPLIT, TEST_SPLIT])
54
+ y_train, y_test, y_validate = np.split(y_values, [TRAIN_SPLIT, TEST_SPLIT])
55
+
56
+ # Double check that our splits add up correctly
57
+ assert (x_train.size + x_validate.size + x_test.size) == SAMPLES
58
+
59
+ # Plot the data in each partition in different colors:
60
+ plt.plot(x_train, y_train, 'b.', label="Train")
61
+ plt.plot(x_test, y_test, 'r.', label="Test")
62
+ plt.plot(x_validate, y_validate, 'y.', label="Validate")
63
+ plt.legend()
64
+ plt.show()
65
+
66
+ # Convert data to PyTorch tensors
67
+ x_train_tensor = torch.from_numpy(x_train).unsqueeze(1)
68
+ y_train_tensor = torch.from_numpy(y_train).unsqueeze(1)
69
+ x_test_tensor = torch.from_numpy(x_test).unsqueeze(1)
70
+ y_test_tensor = torch.from_numpy(y_test).unsqueeze(1)
71
+ x_validate_tensor = torch.from_numpy(x_validate).unsqueeze(1)
72
+ y_validate_tensor = torch.from_numpy(y_validate).unsqueeze(1)
73
+
74
+ # Define the PyTorch model
75
+ class SineModel(nn.Module):
76
+ def __init__(self):
77
+ super(SineModel, self).__init__()
78
+ self.fc1 = nn.Linear(1, 16)
79
+ self.fc2 = nn.Linear(16, 16)
80
+ self.fc3 = nn.Linear(16, 1)
81
+ self.relu = nn.ReLU()
82
+
83
+ def forward(self, x):
84
+ x = self.relu(self.fc1(x))
85
+ x = self.relu(self.fc2(x))
86
+ x = self.fc3(x)
87
+ return x
88
+
89
+ model = SineModel()
90
+
91
+ # Define optimizer and loss function
92
+ optimizer = optim.Adam(model.parameters(), lr=0.001) # lr = learning rate
93
+ loss_fn = nn.MSELoss()
94
+
95
+ # Train the model
96
+ epochs = 500
97
+ batch_size = 64
98
+ train_losses = []
99
+ val_losses = []
100
+
101
+ for epoch in range(1, epochs + 1):
102
+ # Training
103
+ model.train()
104
+ permutation = torch.randperm(x_train_tensor.size()[0])
105
+
106
+ epoch_train_loss = 0.0
107
+ for i in range(0, x_train_tensor.size()[0], batch_size):
108
+ indices = permutation[i:i+batch_size]
109
+ x_batch, y_batch = x_train_tensor[indices], y_train_tensor[indices]
110
+
111
+ optimizer.zero_grad()
112
+ y_pred = model(x_batch)
113
+ loss = loss_fn(y_pred, y_batch)
114
+ loss.backward()
115
+ optimizer.step()
116
+
117
+ epoch_train_loss += loss.item() * x_batch.size(0)
118
+
119
+ train_losses.append(epoch_train_loss/x_train_tensor.size(0))
120
+
121
+ # Validation
122
+ model.eval()
123
+ with torch.no_grad():
124
+ y_val_pred = model(x_validate_tensor)
125
+ val_loss = loss_fn(y_val_pred, y_validate_tensor).item()
126
+ val_losses.append(val_loss)
127
+
128
+ if epoch % 100 == 0:
129
+ print(f'Epoch {epoch}/{epochs}, Training Loss: {train_losses[-1]:.4f}, Validation Loss: {val_loss:.4f}')
130
+
131
+ # Draw a graph of the loss, which is the distance between
132
+ # the predicted and actual values during training and validation.
133
+
134
+ epochs_range = range(1, len(train_losses) + 1)
135
+
136
+ # Exclude the first few epochs so the graph is easier to read
137
+ SKIP = 0
138
+
139
+ plt.figure(figsize=(10, 4))
140
+ plt.subplot(1, 2, 1)
141
+
142
+ plt.plot(epochs_range[SKIP:], train_losses[SKIP:], 'g.', label='Training loss')
143
+ plt.plot(epochs_range[SKIP:], val_losses[SKIP:], 'b.', label='Validation loss')
144
+ plt.title('Training and validation loss')
145
+ plt.xlabel('Epochs')
146
+ plt.ylabel('Loss')
147
+ plt.legend()
148
+
149
+ plt.subplot(1, 2, 2)
150
+
151
+ # Draw a graph of mean absolute error, which is another way of
152
+ # measuring the amount of error in the prediction.
153
+ # We need to recalculate MAE because it was not stored during training
154
+ model.eval()
155
+ with torch.no_grad():
156
+ train_mae = torch.mean(torch.abs(model(x_train_tensor) - y_train_tensor)).item()
157
+ val_mae = torch.mean(torch.abs(model(x_validate_tensor) - y_validate_tensor)).item()
158
+
159
+ plt.plot([epochs_range[-1]], [train_mae], 'g.', label='Training MAE')
160
+ plt.plot([epochs_range[-1]], [val_mae], 'b.', label='Validation MAE')
161
+ plt.title('Training and validation mean absolute error')
162
+ plt.xlabel('Epochs (only final epoch shown for MAE)')
163
+ plt.ylabel('MAE')
164
+ plt.legend()
165
+
166
+ plt.tight_layout()
167
+ plt.show()
168
+
169
+ # Calculate and print the loss on our test dataset
170
+ model.eval()
171
+ with torch.no_grad():
172
+ y_test_pred_tensor = model(x_test_tensor)
173
+ test_loss = loss_fn(y_test_pred_tensor, y_test_tensor).item()
174
+ test_mae = torch.mean(torch.abs(y_test_pred_tensor - y_test_tensor)).item()
175
+
176
+ print(f'Test Loss: {test_loss:.4f}')
177
+ print(f'Test MAE: {test_mae:.4f}')
178
+
179
+ # Make predictions based on our test dataset
180
+ # y_test_pred has already been computed above when calculating test loss/MAE
181
+
182
+ # Graph the predictions against the actual values
183
+ plt.clf()
184
+ plt.title('Comparison of predictions and actual values')
185
+ plt.plot(x_test, y_test, 'b.', label='Actual values')
186
+ plt.plot(x_test, y_test_pred_tensor.detach().numpy(), 'r.', label='PyTorch predicted')
187
+ plt.legend()
188
+ plt.show()
189
+
190
+ # PyTorch does not have a direct equivalent to TensorFlow Lite built-in.
191
+ # For mobile/embedded