|
|
|
import numpy as np |
|
import seaborn as sns |
|
import matplotlib.pyplot as plt |
|
import matplotlib.animation as animation |
|
from scipy import stats |
|
|
|
from sklearn.datasets.samples_generator import make_regression |
|
|
|
|
|
|
|
def gradient_descent(x,y,theta_1,theta_2,learning_rate,n_epochs): |
|
|
|
m = len(x) |
|
|
|
|
|
theta = np.array([theta_1,theta_2]) |
|
|
|
|
|
for i in range(m): |
|
x[i] = np.array([1,x[i]]) |
|
|
|
|
|
J_thetas = np.zeros(n_epochs) |
|
|
|
gradient_J_thetas = np.zeros(n_epochs,2) |
|
|
|
|
|
for i in range(n_epochs): |
|
|
|
|
|
x_dot_theta_minus_y = np.dot(x,theta) - y |
|
|
|
|
|
J_thetas[i] = 0.5*np.dot(x_dot_theta_minus_y,x_dot_theta_minus_y) |
|
print("la fonction loss de l'itteration i est egale a ", J_thetas[i]) |
|
|
|
|
|
gradient_J_thetas[i] = np.dot(x,x_dot_theta_minus_y) |
|
print("le gradient de l'itteration i est egale a ", gradient_J_thetas[i]) |
|
|
|
|
|
theta = theta - learning_rate*gradient_J_thetas[i] |
|
print ("le nouveau theta est : ", theta) |
|
|
|
|
|
|
|
|
|
|
|
|