Spaces:
Runtime error
Runtime error
predict.Python
Browse files- predict. Python +83 -0
predict. Python
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
def predict_score(x1, x2):
|
4 |
+
Theta0 = torch.tensor(-0.5738734424645411)
|
5 |
+
Theta1 = torch.tensor(2.1659122905141825)
|
6 |
+
Theta2 = torch.tensor(0.0)
|
7 |
+
y_actual = Theta0 + Theta1 * x1 + Theta2 * 23 # Adjust the constant value here if needed
|
8 |
+
return y_actual.item()
|
9 |
+
|
10 |
+
def gradient_descent():
|
11 |
+
# Input data
|
12 |
+
x1 = torch.tensor([50, 60, 70, 80, 90])
|
13 |
+
x2 = torch.tensor([20, 21, 22, 23, 24])
|
14 |
+
y_actual = torch.tensor([30, 35, 40, 45, 50])
|
15 |
+
|
16 |
+
# Learning rate and maximum number of iterations
|
17 |
+
alpha = 0.01
|
18 |
+
max_iters = 1000
|
19 |
+
|
20 |
+
# Initial values for Theta0, Theta1, and Theta2
|
21 |
+
Theta0 = torch.tensor(0.0, requires_grad=True)
|
22 |
+
Theta1 = torch.tensor(0.0, requires_grad=True)
|
23 |
+
Theta2 = torch.tensor(0.0, requires_grad=True)
|
24 |
+
|
25 |
+
# Start the iteration counter
|
26 |
+
iter_count = 0
|
27 |
+
|
28 |
+
# Loop until convergence or maximum number of iterations
|
29 |
+
while iter_count < max_iters:
|
30 |
+
# Compute the predicted output
|
31 |
+
y_pred = Theta0 + Theta1 * x1 + Theta2 * x2
|
32 |
+
|
33 |
+
# Compute the errors
|
34 |
+
errors = y_pred - y_actual
|
35 |
+
|
36 |
+
# Compute the cost function
|
37 |
+
cost = torch.sum(errors ** 2) / (2 * len(x1))
|
38 |
+
|
39 |
+
# Print the cost function every 100 iterations
|
40 |
+
if iter_count % 100 == 0:
|
41 |
+
print("Iteration {}: Cost = {}, Theta0 = {}, Theta1 = {}, Theta2 = {}".format(iter_count, cost, Theta0.item(), Theta1.item(), Theta2.item()))
|
42 |
+
|
43 |
+
# Check for convergence (if the cost is decreasing by less than 0.0001)
|
44 |
+
if iter_count > 0 and torch.abs(cost - prev_cost) < 0.0001:
|
45 |
+
print("Converged after {} iterations".format(iter_count))
|
46 |
+
break
|
47 |
+
|
48 |
+
# Perform automatic differentiation to compute gradients
|
49 |
+
cost.backward()
|
50 |
+
|
51 |
+
# Update Theta0, Theta1, and Theta2 using gradient descent
|
52 |
+
with torch.no_grad():
|
53 |
+
Theta0 -= alpha * Theta0.grad
|
54 |
+
Theta1 -= alpha * Theta1.grad
|
55 |
+
Theta2 -= alpha * Theta2.grad
|
56 |
+
|
57 |
+
# Reset gradients for the next iteration
|
58 |
+
Theta0.grad.zero_()
|
59 |
+
Theta1.grad.zero_()
|
60 |
+
Theta2.grad.zero_()
|
61 |
+
|
62 |
+
# Update the iteration counter and previous cost
|
63 |
+
iter_count += 1
|
64 |
+
prev_cost = cost
|
65 |
+
|
66 |
+
# Print the final values of Theta0, Theta1, and Theta2
|
67 |
+
print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
|
68 |
+
print("Final Cost: Cost = {}".format(cost.item()))
|
69 |
+
print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))
|
70 |
+
|
71 |
+
# Launch the prediction interface
|
72 |
+
while True:
|
73 |
+
x1 = float(input("Enter the number of new students: "))
|
74 |
+
x2 = float(input("Enter the number of temperature: "))
|
75 |
+
predicted_rooms = predict_score(x1, x2)
|
76 |
+
print("Predicted rooms:", predicted_rooms)
|
77 |
+
print()
|
78 |
+
|
79 |
+
choice = input("Do you want to predict again? (y/n): ")
|
80 |
+
if choice.lower() != 'y':
|
81 |
+
break
|
82 |
+
|
83 |
+
gradient_descent()
|