Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
from typing import Dict
|
5 |
+
import time
|
6 |
+
from sklearn.metrics import accuracy_score, mean_squared_error, classification_report
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import matplotlib.pyplot as plt
|
10 |
+
|
11 |
+
# Additional ML helper functions
|
12 |
+
def evaluate_ml_solution(y_true, y_pred, task_type='classification'):
|
13 |
+
"""Evaluate ML model predictions"""
|
14 |
+
if task_type == 'classification':
|
15 |
+
accuracy = accuracy_score(y_true, y_pred)
|
16 |
+
report = classification_report(y_true, y_pred)
|
17 |
+
return f"Accuracy: {accuracy:.4f}\n\nDetailed Report:\n{report}"
|
18 |
+
else:
|
19 |
+
mse = mean_squared_error(y_true, y_pred)
|
20 |
+
rmse = np.sqrt(mse)
|
21 |
+
return f"MSE: {mse:.4f}\nRMSE: {rmse:.4f}"
|
22 |
+
|
23 |
+
# Extended problem set including ML problems
|
24 |
+
PROBLEM_DATA = {
|
25 |
+
# Original Algorithm Problems
|
26 |
+
"Valid Parentheses": {
|
27 |
+
"type": "algorithm",
|
28 |
+
"difficulty": "easy",
|
29 |
+
"description": "Given a string s containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.",
|
30 |
+
"test_cases": [
|
31 |
+
{'input': "()", 'expected': True},
|
32 |
+
{'input': "()[]{}", 'expected': True},
|
33 |
+
{'input': "(]", 'expected': False}
|
34 |
+
],
|
35 |
+
"sample_input": "()",
|
36 |
+
"starter_code": """def solution(s: str) -> bool:
|
37 |
+
# Write your solution here
|
38 |
+
pass"""
|
39 |
+
},
|
40 |
+
|
41 |
+
# ML Classification Problem
|
42 |
+
"Binary Classification": {
|
43 |
+
"type": "ml_classification",
|
44 |
+
"difficulty": "medium",
|
45 |
+
"description": "Create a binary classifier for the provided dataset. Features include numerical values, target is binary (0/1).",
|
46 |
+
"test_cases": [
|
47 |
+
{
|
48 |
+
'input': pd.DataFrame({
|
49 |
+
'feature1': [1.2, 2.3, 3.4, 4.5],
|
50 |
+
'feature2': [2.1, 3.2, 4.3, 5.4]
|
51 |
+
}),
|
52 |
+
'expected': np.array([0, 1, 1, 0])
|
53 |
+
}
|
54 |
+
],
|
55 |
+
"starter_code": """class MLSolution:
|
56 |
+
def __init__(self):
|
57 |
+
self.model = None
|
58 |
+
|
59 |
+
def fit(self, X, y):
|
60 |
+
# Implement training logic
|
61 |
+
pass
|
62 |
+
|
63 |
+
def predict(self, X):
|
64 |
+
# Implement prediction logic
|
65 |
+
return np.zeros(len(X))"""
|
66 |
+
},
|
67 |
+
|
68 |
+
# Neural Network Problem
|
69 |
+
"Simple Neural Network": {
|
70 |
+
"type": "deep_learning",
|
71 |
+
"difficulty": "hard",
|
72 |
+
"description": "Implement a simple neural network for binary classification using PyTorch.",
|
73 |
+
"test_cases": [
|
74 |
+
{
|
75 |
+
'input': torch.randn(10, 5), # 10 samples, 5 features
|
76 |
+
'expected': torch.randint(0, 2, (10,))
|
77 |
+
}
|
78 |
+
],
|
79 |
+
"starter_code": """class NeuralNetwork(nn.Module):
|
80 |
+
def __init__(self, input_size):
|
81 |
+
super(NeuralNetwork, self).__init__()
|
82 |
+
self.layer1 = nn.Linear(input_size, 64)
|
83 |
+
self.layer2 = nn.Linear(64, 1)
|
84 |
+
self.sigmoid = nn.Sigmoid()
|
85 |
+
|
86 |
+
def forward(self, x):
|
87 |
+
x = torch.relu(self.layer1(x))
|
88 |
+
x = self.sigmoid(self.layer2(x))
|
89 |
+
return x"""
|
90 |
+
},
|
91 |
+
|
92 |
+
# Regression Problem
|
93 |
+
"House Price Prediction": {
|
94 |
+
"type": "ml_regression",
|
95 |
+
"difficulty": "medium",
|
96 |
+
"description": "Implement a regression model to predict house prices based on features like size, location, etc.",
|
97 |
+
"test_cases": [
|
98 |
+
{
|
99 |
+
'input': pd.DataFrame({
|
100 |
+
'size': [1500, 2000, 2500],
|
101 |
+
'rooms': [3, 4, 5],
|
102 |
+
'location_score': [8, 7, 9]
|
103 |
+
}),
|
104 |
+
'expected': np.array([250000, 300000, 400000])
|
105 |
+
}
|
106 |
+
],
|
107 |
+
"starter_code": """class RegressionSolution:
|
108 |
+
def __init__(self):
|
109 |
+
self.model = None
|
110 |
+
|
111 |
+
def fit(self, X, y):
|
112 |
+
# Implement training logic
|
113 |
+
pass
|
114 |
+
|
115 |
+
def predict(self, X):
|
116 |
+
# Implement prediction logic
|
117 |
+
return np.zeros(len(X))"""
|
118 |
+
}
|
119 |
+
}
|
120 |
+
|
121 |
+
def create_sample_data(problem_type: str) -> Dict:
|
122 |
+
"""Create sample datasets for ML problems"""
|
123 |
+
if problem_type == 'ml_classification':
|
124 |
+
X_train = pd.DataFrame(np.random.randn(100, 2), columns=['feature1', 'feature2'])
|
125 |
+
y_train = np.random.randint(0, 2, 100)
|
126 |
+
X_test = pd.DataFrame(np.random.randn(20, 2), columns=['feature1', 'feature2'])
|
127 |
+
y_test = np.random.randint(0, 2, 20)
|
128 |
+
return {'X_train': X_train, 'y_train': y_train, 'X_test': X_test, 'y_test': y_test}
|
129 |
+
|
130 |
+
elif problem_type == 'ml_regression':
|
131 |
+
X_train = pd.DataFrame(np.random.randn(100, 3),
|
132 |
+
columns=['size', 'rooms', 'location_score'])
|
133 |
+
y_train = np.random.uniform(200000, 500000, 100)
|
134 |
+
X_test = pd.DataFrame(np.random.randn(20, 3),
|
135 |
+
columns=['size', 'rooms', 'location_score'])
|
136 |
+
y_test = np.random.uniform(200000, 500000, 20)
|
137 |
+
return {'X_train': X_train, 'y_train': y_train, 'X_test': X_test, 'y_test': y_test}
|
138 |
+
|
139 |
+
elif problem_type == 'deep_learning':
|
140 |
+
# Generate sample data for neural network
|
141 |
+
X_train = torch.randn(100, 5) # 100 samples, 5 features
|
142 |
+
y_train = torch.randint(0, 2, (100,)) # Binary classification
|
143 |
+
X_test = torch.randn(20, 5) # 20 samples, 5 features
|
144 |
+
y_test = torch.randint(0, 2, (20,)) # Binary classification
|
145 |
+
return {'X_train': X_train, 'y_train': y_train, 'X_test': X_test, 'y_test': y_test}
|
146 |
+
|
147 |
+
return None
|
148 |
+
|
149 |
+
def run_tests(problem_name: str, user_code: str) -> str:
|
150 |
+
try:
|
151 |
+
problem = PROBLEM_DATA[problem_name]
|
152 |
+
|
153 |
+
if problem["type"] == "algorithm":
|
154 |
+
# Execute algorithm problems
|
155 |
+
namespace = {}
|
156 |
+
exec(user_code, namespace)
|
157 |
+
results = []
|
158 |
+
|
159 |
+
for i, test in enumerate(problem["test_cases"], 1):
|
160 |
+
try:
|
161 |
+
start_time = time.time()
|
162 |
+
output = namespace["solution"](test["input"])
|
163 |
+
execution_time = time.time() - start_time
|
164 |
+
|
165 |
+
passed = output == test["expected"]
|
166 |
+
results.append(
|
167 |
+
f"Test #{i}:\n"
|
168 |
+
f"Input: {test['input']}\n"
|
169 |
+
f"Expected: {test['expected']}\n"
|
170 |
+
f"Got: {output}\n"
|
171 |
+
f"Time: {execution_time:.6f}s\n"
|
172 |
+
f"Status: {'✓ PASSED' if passed else '✗ FAILED'}\n"
|
173 |
+
)
|
174 |
+
except Exception as e:
|
175 |
+
results.append(f"Test #{i} Error: {str(e)}\n")
|
176 |
+
|
177 |
+
return "\n".join(results)
|
178 |
+
|
179 |
+
else:
|
180 |
+
# Execute ML problems
|
181 |
+
namespace = {"np": np, "pd": pd, "nn": nn, "torch": torch}
|
182 |
+
exec(user_code, namespace)
|
183 |
+
|
184 |
+
# Create sample data
|
185 |
+
data = create_sample_data(problem["type"])
|
186 |
+
if not data:
|
187 |
+
return "Error: Invalid problem type"
|
188 |
+
|
189 |
+
try:
|
190 |
+
if problem["type"] in ["ml_classification", "ml_regression"]:
|
191 |
+
# Initialize and train model
|
192 |
+
model = namespace["MLSolution"]()
|
193 |
+
model.fit(data["X_train"], data["y_train"])
|
194 |
+
|
195 |
+
# Make predictions
|
196 |
+
predictions = model.predict(data["X_test"])
|
197 |
+
|
198 |
+
# Evaluate
|
199 |
+
eval_result = evaluate_ml_solution(
|
200 |
+
data["y_test"],
|
201 |
+
predictions,
|
202 |
+
"classification" if problem["type"] == "ml_classification" else "regression"
|
203 |
+
)
|
204 |
+
|
205 |
+
return f"Model Evaluation:\n{eval_result}"
|
206 |
+
|
207 |
+
elif problem["type"] == "deep_learning":
|
208 |
+
# Initialize neural network
|
209 |
+
model = namespace["NeuralNetwork"](data["X_train"].shape[1])
|
210 |
+
criterion = nn.BCELoss() # Binary cross-entropy loss
|
211 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
|
212 |
+
|
213 |
+
# Convert data to tensors
|
214 |
+
X_train = data["X_train"].float()
|
215 |
+
y_train = data["y_train"].float().view(-1, 1)
|
216 |
+
X_test = data["X_test"].float()
|
217 |
+
y_test = data["y_test"].float().view(-1, 1)
|
218 |
+
|
219 |
+
# Train the model
|
220 |
+
for epoch in range(10): # 10 epochs
|
221 |
+
optimizer.zero_grad()
|
222 |
+
outputs = model(X_train)
|
223 |
+
loss = criterion(outputs, y_train)
|
224 |
+
loss.backward()
|
225 |
+
optimizer.step()
|
226 |
+
|
227 |
+
# Evaluate the model
|
228 |
+
with torch.no_grad():
|
229 |
+
predictions = model(X_test)
|
230 |
+
predictions = (predictions > 0.5).float() # Convert probabilities to binary predictions
|
231 |
+
accuracy = (predictions == y_test).float().mean()
|
232 |
+
|
233 |
+
return f"Neural Network Evaluation:\nAccuracy: {accuracy.item():.4f}"
|
234 |
+
|
235 |
+
except Exception as e:
|
236 |
+
return f"Error in ML execution: {str(e)}"
|
237 |
+
|
238 |
+
except Exception as e:
|
239 |
+
return f"Error in code execution: {str(e)}"
|
240 |
+
|
241 |
+
# Create Gradio interface with enhanced features
|
242 |
+
def create_interface():
|
243 |
+
with gr.Blocks(title="Advanced LeetCode & ML Testing Platform") as iface:
|
244 |
+
# All components and event handlers must be defined within this 'with' block
|
245 |
+
gr.Markdown("# Advanced LeetCode & ML Testing Platform")
|
246 |
+
|
247 |
+
with gr.Tabs():
|
248 |
+
with gr.Tab("Problem Solving"):
|
249 |
+
problem_dropdown = gr.Dropdown(
|
250 |
+
choices=list(PROBLEM_DATA.keys()),
|
251 |
+
label="Select Problem"
|
252 |
+
)
|
253 |
+
difficulty_display = gr.Textbox(label="Difficulty")
|
254 |
+
problem_type = gr.Textbox(label="Problem Type")
|
255 |
+
description_text = gr.Textbox(label="Description", lines=5)
|
256 |
+
code_input = gr.Textbox(label="Your Code", lines=10, value="")
|
257 |
+
results_output = gr.Textbox(label="Test Results", value="", lines=10)
|
258 |
+
|
259 |
+
with gr.Row():
|
260 |
+
run_button = gr.Button("Run Tests")
|
261 |
+
clear_button = gr.Button("Clear Code")
|
262 |
+
|
263 |
+
# Event handler for Run Tests button (inside Blocks context)
|
264 |
+
run_button.click(
|
265 |
+
run_tests,
|
266 |
+
inputs=[problem_dropdown, code_input],
|
267 |
+
outputs=[results_output]
|
268 |
+
)
|
269 |
+
|
270 |
+
# Event handler for Clear Code button (inside Blocks context)
|
271 |
+
clear_button.click(
|
272 |
+
lambda: "",
|
273 |
+
inputs=[],
|
274 |
+
outputs=[code_input]
|
275 |
+
)
|
276 |
+
|
277 |
+
# Event handler for problem selection (inside Blocks context)
|
278 |
+
def update_problem_info(problem_name):
|
279 |
+
problem = PROBLEM_DATA[problem_name]
|
280 |
+
return (
|
281 |
+
problem["difficulty"],
|
282 |
+
problem["type"],
|
283 |
+
problem["description"],
|
284 |
+
problem["starter_code"],
|
285 |
+
"" # Clear results
|
286 |
+
)
|
287 |
+
|
288 |
+
problem_dropdown.change(
|
289 |
+
update_problem_info,
|
290 |
+
inputs=[problem_dropdown],
|
291 |
+
outputs=[
|
292 |
+
difficulty_display,
|
293 |
+
problem_type,
|
294 |
+
description_text,
|
295 |
+
code_input,
|
296 |
+
results_output
|
297 |
+
]
|
298 |
+
)
|
299 |
+
|
300 |
+
with gr.Tab("Visualization"):
|
301 |
+
with gr.Row():
|
302 |
+
plot_type = gr.Dropdown(
|
303 |
+
choices=["Learning Curve", "Confusion Matrix", "Feature Importance"],
|
304 |
+
label="Select Plot Type"
|
305 |
+
)
|
306 |
+
visualize_button = gr.Button("Generate Visualization")
|
307 |
+
|
308 |
+
plot_output = gr.Plot(label="Visualization")
|
309 |
+
|
310 |
+
# Event handler for visualization
|
311 |
+
def generate_visualization(plot_type):
|
312 |
+
if plot_type == "Learning Curve":
|
313 |
+
# Example learning curve
|
314 |
+
plt.figure()
|
315 |
+
plt.plot([0, 1, 2, 3, 4], [0.8, 0.7, 0.6, 0.5, 0.4], label="Training Loss")
|
316 |
+
plt.plot([0, 1, 2, 3, 4], [0.9, 0.8, 0.7, 0.6, 0.5], label="Validation Loss")
|
317 |
+
plt.xlabel("Epochs")
|
318 |
+
plt.ylabel("Loss")
|
319 |
+
plt.title("Learning Curve")
|
320 |
+
plt.legend()
|
321 |
+
return plt
|
322 |
+
else:
|
323 |
+
return None
|
324 |
+
|
325 |
+
visualize_button.click(
|
326 |
+
generate_visualization,
|
327 |
+
inputs=[plot_type],
|
328 |
+
outputs=[plot_output]
|
329 |
+
)
|
330 |
+
|
331 |
+
return iface
|
332 |
+
|
333 |
+
|
334 |
+
if __name__ == "__main__":
|
335 |
+
iface = create_interface()
|
336 |
+
iface.launch()
|