import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim from sklearn.model_selection import train_test_split # Step 1: Create Synthetic Data np.random.seed(42) X = np.linspace(-10, 10, 1000) y = 2.5 * X + np.random.normal(0, 2, X.shape) # Linear relation with noise # Split into training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Convert to PyTorch tensors X_train = torch.tensor(X_train, dtype=torch.float32).view(-1, 1) y_train = torch.tensor(y_train, dtype=torch.float32).view(-1, 1) X_test = torch.tensor(X_test, dtype=torch.float32).view(-1, 1) y_test = torch.tensor(y_test, dtype=torch.float32).view(-1, 1) # Step 2: Define and Train a Neural Network Model class SimpleNN(nn.Module): def __init__(self): super(SimpleNN, self).__init__() self.fc1 = nn.Linear(1, 10) self.fc2 = nn.Linear(10, 1) def forward(self, x): x = torch.relu(self.fc1(x)) x = self.fc2(x) return x # Initialize model, loss function, and optimizer model = SimpleNN() criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=0.01) # Training loop epochs = 500 losses = [] for epoch in range(epochs): model.train() optimizer.zero_grad() outputs = model(X_train) loss = criterion(outputs, y_train) loss.backward() optimizer.step() losses.append(loss.item()) if (epoch + 1) % 50 == 0: print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}') # Step 3: Plot the Results # Plot the synthetic data and the model's predictions model.eval() with torch.no_grad(): predicted = model(X_test).numpy() plt.figure(figsize=(12, 6)) # Plot data and predictions plt.subplot(1, 2, 1) plt.scatter(X_test, y_test, label='Original data', alpha=0.5) plt.scatter(X_test, predicted, label='Fitted line', alpha=0.5) plt.title('Regression Results') plt.xlabel('X') plt.ylabel('y') plt.legend() # Plot training loss plt.subplot(1, 2, 2) plt.plot(losses) plt.title('Training Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.tight_layout() plt.show()