Spaces:
Sleeping
Sleeping
Create backend/app.py
Browse files- backend/app.py +85 -0
backend/app.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify
|
2 |
+
import mlflow
|
3 |
+
import pandas as pd
|
4 |
+
from pycaret.classification import *
|
5 |
+
from pycaret.regression import *
|
6 |
+
from pycaret.clustering import *
|
7 |
+
import json
|
8 |
+
import os
|
9 |
+
import groq
|
10 |
+
|
11 |
+
app = Flask(__name__)
|
12 |
+
|
13 |
+
# Initialize GROQ client
|
14 |
+
groq_client = groq.Client(api_key=os.getenv("GROQ_API_KEY"))
|
15 |
+
|
16 |
+
# MLflow Configuration
|
17 |
+
mlflow.set_tracking_uri("http://127.0.0.1:5000")
|
18 |
+
mlflow.set_experiment("Neural-Vision Enhanced")
|
19 |
+
|
20 |
+
@app.route('/analyze', methods=['POST'])
|
21 |
+
def analyze():
|
22 |
+
try:
|
23 |
+
data = request.json
|
24 |
+
prompt = data.get('prompt')
|
25 |
+
context = json.loads(data.get('context'))
|
26 |
+
metrics = data.get('metrics', {})
|
27 |
+
|
28 |
+
# Create GROQ prompt with context
|
29 |
+
system_prompt = f"""
|
30 |
+
You are a data science assistant analyzing model metrics and data.
|
31 |
+
Context: {json.dumps(context, indent=2)}
|
32 |
+
Metrics: {json.dumps(metrics, indent=2)}
|
33 |
+
"""
|
34 |
+
|
35 |
+
# Get GROQ response
|
36 |
+
response = groq_client.chat.completions.create(
|
37 |
+
messages=[
|
38 |
+
{"role": "system", "content": system_prompt},
|
39 |
+
{"role": "user", "content": prompt}
|
40 |
+
],
|
41 |
+
model="mixtral-8x7b-32768",
|
42 |
+
temperature=0.7,
|
43 |
+
max_tokens=1024
|
44 |
+
)
|
45 |
+
|
46 |
+
return jsonify({
|
47 |
+
"analysis": response.choices[0].message.content
|
48 |
+
})
|
49 |
+
|
50 |
+
except Exception as e:
|
51 |
+
return jsonify({"error": str(e)}), 500
|
52 |
+
|
53 |
+
@app.route('/train', methods=['POST'])
|
54 |
+
def train_model():
|
55 |
+
try:
|
56 |
+
data = request.json
|
57 |
+
df = pd.DataFrame(data['data'])
|
58 |
+
problem_type = data['problem_type']
|
59 |
+
target = data.get('target')
|
60 |
+
|
61 |
+
if problem_type == "Classification":
|
62 |
+
setup(df, target=target, session_id=42)
|
63 |
+
elif problem_type == "Regression":
|
64 |
+
setup(df, target=target, session_id=42)
|
65 |
+
else:
|
66 |
+
setup(df, session_id=42)
|
67 |
+
|
68 |
+
best_model = compare_models()
|
69 |
+
metrics = pull().to_dict()
|
70 |
+
|
71 |
+
# Log to MLflow
|
72 |
+
with mlflow.start_run():
|
73 |
+
mlflow.log_metrics(metrics)
|
74 |
+
mlflow.sklearn.log_model(best_model, "model")
|
75 |
+
|
76 |
+
return jsonify({
|
77 |
+
"model": str(best_model),
|
78 |
+
"metrics": metrics
|
79 |
+
})
|
80 |
+
|
81 |
+
except Exception as e:
|
82 |
+
return jsonify({"error": str(e)}), 500
|
83 |
+
|
84 |
+
if __name__ == '__main__':
|
85 |
+
app.run(host='127.0.0.1', port=5001)
|