Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,16 @@ import socket
|
|
2 |
import subprocess
|
3 |
import gradio as gr
|
4 |
from openai import OpenAI
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
subprocess.Popen("bash /home/user/app/start.sh", shell=True)
|
@@ -9,12 +19,88 @@ subprocess.Popen("bash /home/user/app/start.sh", shell=True)
|
|
9 |
client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="sk-local", timeout=600)
|
10 |
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
def respond(
|
13 |
message,
|
14 |
-
history: list[tuple[str, str]]=[],
|
15 |
system_message=None,
|
16 |
max_tokens=None,
|
17 |
-
temperature=0.7
|
18 |
):
|
19 |
messages = []
|
20 |
if system_message:
|
@@ -63,22 +149,70 @@ def respond(
|
|
63 |
},
|
64 |
},
|
65 |
},
|
66 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
],
|
68 |
)
|
69 |
|
70 |
print("messages", messages)
|
71 |
output = ""
|
|
|
|
|
72 |
for chunk in stream:
|
73 |
delta = chunk.choices[0].delta
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
try:
|
76 |
-
|
|
|
|
|
|
|
77 |
except:
|
78 |
-
|
|
|
79 |
|
80 |
yield output
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
except Exception as e:
|
83 |
print(f"[Error] {e}")
|
84 |
yield "⚠️ Llama.cpp server error"
|
|
|
2 |
import subprocess
|
3 |
import gradio as gr
|
4 |
from openai import OpenAI
|
5 |
+
import json
|
6 |
+
import sys
|
7 |
+
from io import StringIO
|
8 |
+
import traceback
|
9 |
+
import matplotlib
|
10 |
+
|
11 |
+
matplotlib.use("Agg") # Use non-interactive backend
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
import base64
|
14 |
+
from io import BytesIO
|
15 |
|
16 |
|
17 |
subprocess.Popen("bash /home/user/app/start.sh", shell=True)
|
|
|
19 |
client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="sk-local", timeout=600)
|
20 |
|
21 |
|
22 |
+
def execute_python_code(code):
|
23 |
+
"""Execute Python code safely and return results"""
|
24 |
+
# Capture stdout
|
25 |
+
old_stdout = sys.stdout
|
26 |
+
sys.stdout = StringIO()
|
27 |
+
|
28 |
+
# Store any plots
|
29 |
+
plt.clf() # Clear any existing plots
|
30 |
+
|
31 |
+
try:
|
32 |
+
# Execute the code
|
33 |
+
exec_globals = {
|
34 |
+
"plt": plt,
|
35 |
+
"matplotlib": matplotlib,
|
36 |
+
"__builtins__": __builtins__,
|
37 |
+
# Add other safe modules as needed
|
38 |
+
"json": json,
|
39 |
+
"math": __import__("math"),
|
40 |
+
"numpy": __import__("numpy"), # if available
|
41 |
+
"pandas": __import__("pandas"), # if available
|
42 |
+
}
|
43 |
+
|
44 |
+
exec(code, exec_globals)
|
45 |
+
|
46 |
+
# Get printed output
|
47 |
+
output = sys.stdout.getvalue()
|
48 |
+
|
49 |
+
# Check if there are any plots
|
50 |
+
plot_data = None
|
51 |
+
if plt.get_fignums(): # If there are active figures
|
52 |
+
buf = BytesIO()
|
53 |
+
plt.savefig(buf, format="png", bbox_inches="tight", dpi=150)
|
54 |
+
buf.seek(0)
|
55 |
+
plot_data = base64.b64encode(buf.read()).decode()
|
56 |
+
plt.close("all") # Close all figures
|
57 |
+
|
58 |
+
sys.stdout = old_stdout
|
59 |
+
|
60 |
+
result = {"success": True, "output": output, "plot": plot_data}
|
61 |
+
|
62 |
+
return result
|
63 |
+
|
64 |
+
except Exception as e:
|
65 |
+
sys.stdout = old_stdout
|
66 |
+
error_msg = f"Error: {str(e)}\n{traceback.format_exc()}"
|
67 |
+
return {"success": False, "output": error_msg, "plot": None}
|
68 |
+
|
69 |
+
|
70 |
+
def handle_function_call(function_name, arguments):
|
71 |
+
"""Handle function calls from the model"""
|
72 |
+
if function_name == "browser_search":
|
73 |
+
# Implement your browser search logic here
|
74 |
+
query = arguments.get("query", "")
|
75 |
+
max_results = arguments.get("max_results", 5)
|
76 |
+
return f"Search results for '{query}' (max {max_results} results): [Implementation needed]"
|
77 |
+
|
78 |
+
elif function_name == "code_interpreter":
|
79 |
+
code = arguments.get("code", "")
|
80 |
+
if not code:
|
81 |
+
return "No code provided to execute."
|
82 |
+
|
83 |
+
result = execute_python_code(code)
|
84 |
+
|
85 |
+
if result["success"]:
|
86 |
+
response = f"Code executed successfully:\n\n```\n{result['output']}\n```"
|
87 |
+
if result["plot"]:
|
88 |
+
response += (
|
89 |
+
f"\n\n[Plot generated - base64 data: {result['plot'][:50]}...]"
|
90 |
+
)
|
91 |
+
return response
|
92 |
+
else:
|
93 |
+
return f"Code execution failed:\n\n```\n{result['output']}\n```"
|
94 |
+
|
95 |
+
return f"Unknown function: {function_name}"
|
96 |
+
|
97 |
+
|
98 |
def respond(
|
99 |
message,
|
100 |
+
history: list[tuple[str, str]] = [],
|
101 |
system_message=None,
|
102 |
max_tokens=None,
|
103 |
+
temperature=0.7,
|
104 |
):
|
105 |
messages = []
|
106 |
if system_message:
|
|
|
149 |
},
|
150 |
},
|
151 |
},
|
152 |
+
{
|
153 |
+
"type": "function",
|
154 |
+
"function": {
|
155 |
+
"name": "code_interpreter",
|
156 |
+
"description": (
|
157 |
+
"Execute Python code and return the results. "
|
158 |
+
"Can generate plots, perform calculations, and data analysis."
|
159 |
+
),
|
160 |
+
"parameters": {
|
161 |
+
"type": "object",
|
162 |
+
"properties": {
|
163 |
+
"code": {
|
164 |
+
"type": "string",
|
165 |
+
"description": "The Python code to execute.",
|
166 |
+
},
|
167 |
+
},
|
168 |
+
"required": ["code"],
|
169 |
+
},
|
170 |
+
},
|
171 |
+
},
|
172 |
],
|
173 |
)
|
174 |
|
175 |
print("messages", messages)
|
176 |
output = ""
|
177 |
+
function_calls_to_handle = []
|
178 |
+
|
179 |
for chunk in stream:
|
180 |
delta = chunk.choices[0].delta
|
181 |
|
182 |
+
# Handle function calls
|
183 |
+
if hasattr(delta, "tool_calls") and delta.tool_calls:
|
184 |
+
for tool_call in delta.tool_calls:
|
185 |
+
if tool_call.function:
|
186 |
+
function_calls_to_handle.append(
|
187 |
+
{
|
188 |
+
"name": tool_call.function.name,
|
189 |
+
"arguments": json.loads(tool_call.function.arguments),
|
190 |
+
}
|
191 |
+
)
|
192 |
+
|
193 |
+
# Handle regular content
|
194 |
try:
|
195 |
+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
|
196 |
+
output += delta.reasoning_content
|
197 |
+
elif delta.content:
|
198 |
+
output += delta.content
|
199 |
except:
|
200 |
+
if delta.content:
|
201 |
+
output += delta.content
|
202 |
|
203 |
yield output
|
204 |
|
205 |
+
# Handle any function calls that were made
|
206 |
+
if function_calls_to_handle:
|
207 |
+
for func_call in function_calls_to_handle:
|
208 |
+
func_result = handle_function_call(
|
209 |
+
func_call["name"], func_call["arguments"]
|
210 |
+
)
|
211 |
+
output += (
|
212 |
+
f"\n\n**Function Result ({func_call['name']}):**\n{func_result}"
|
213 |
+
)
|
214 |
+
yield output
|
215 |
+
|
216 |
except Exception as e:
|
217 |
print(f"[Error] {e}")
|
218 |
yield "⚠️ Llama.cpp server error"
|