Spaces:
Sleeping
Sleeping
Ilya
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,6 @@ from huggingface_hub import InferenceClient
|
|
3 |
import sys
|
4 |
import io
|
5 |
import traceback
|
6 |
-
import threading
|
7 |
|
8 |
model_name = "Qwen/Qwen2.5-72B-Instruct"
|
9 |
client = InferenceClient(model_name)
|
@@ -12,7 +11,7 @@ def llm_inference(user_sample):
|
|
12 |
eos_token = "<|endoftext|>"
|
13 |
output = client.chat.completions.create(
|
14 |
messages=[
|
15 |
-
{"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window. If the input is code, correct it for mistakes."},
|
16 |
{"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
|
17 |
],
|
18 |
stream=False,
|
@@ -26,129 +25,37 @@ def llm_inference(user_sample):
|
|
26 |
response += choice['message']['content']
|
27 |
return response
|
28 |
|
29 |
-
# A class to handle the execution environment per user session
|
30 |
-
class CodeExecutor:
|
31 |
-
def __init__(self):
|
32 |
-
self.local_vars = {}
|
33 |
-
self.output = ""
|
34 |
-
self.input_buffer = []
|
35 |
-
self.waiting_for_input = False
|
36 |
-
self.execution_thread = None
|
37 |
-
self.execution_complete = False
|
38 |
-
|
39 |
-
def custom_input(self, prompt=''):
|
40 |
-
self.output += prompt
|
41 |
-
self.waiting_for_input = True
|
42 |
-
while self.waiting_for_input:
|
43 |
-
pass # Wait until input is provided
|
44 |
-
return self.input_buffer.pop(0)
|
45 |
-
|
46 |
-
def execute_code(self, code):
|
47 |
-
def run_code():
|
48 |
-
# Redirect stdout
|
49 |
-
old_stdout = sys.stdout
|
50 |
-
sys.stdout = io.StringIO()
|
51 |
-
# Replace input function
|
52 |
-
old_input = __builtins__.input
|
53 |
-
__builtins__.input = self.custom_input
|
54 |
-
try:
|
55 |
-
exec(code, {}, self.local_vars)
|
56 |
-
except Exception as e:
|
57 |
-
self.output += f"\nError: {e}\n{traceback.format_exc()}"
|
58 |
-
finally:
|
59 |
-
self.output += sys.stdout.getvalue()
|
60 |
-
sys.stdout = old_stdout
|
61 |
-
__builtins__.input = old_input
|
62 |
-
self.execution_complete = True
|
63 |
-
|
64 |
-
self.execution_thread = threading.Thread(target=run_code)
|
65 |
-
self.execution_thread.start()
|
66 |
-
|
67 |
-
def provide_input(self, user_input):
|
68 |
-
self.input_buffer.append(user_input)
|
69 |
-
self.waiting_for_input = False
|
70 |
-
|
71 |
-
def get_output(self):
|
72 |
-
if sys.stdout and hasattr(sys.stdout, "getvalue"):
|
73 |
-
self.output += sys.stdout.getvalue()
|
74 |
-
sys.stdout.seek(0)
|
75 |
-
sys.stdout.truncate(0)
|
76 |
-
return self.output
|
77 |
-
|
78 |
-
def is_execution_complete(self):
|
79 |
-
return self.execution_complete
|
80 |
-
|
81 |
-
executors = {}
|
82 |
-
|
83 |
-
def start_execution(code, session_id):
|
84 |
-
executor = CodeExecutor()
|
85 |
-
executors[session_id] = executor
|
86 |
-
executor.execute_code(code)
|
87 |
-
return "Code is running...", gr.update(visible=True), gr.update(visible=True)
|
88 |
-
|
89 |
-
def provide_input(user_input, session_id):
|
90 |
-
executor = executors.get(session_id)
|
91 |
-
if executor and not executor.is_execution_complete():
|
92 |
-
executor.provide_input(user_input)
|
93 |
-
return ""
|
94 |
-
else:
|
95 |
-
return "No execution in progress or execution has completed."
|
96 |
-
|
97 |
-
def poll_output(session_id):
|
98 |
-
executor = executors.get(session_id)
|
99 |
-
if executor:
|
100 |
-
output = executor.get_output()
|
101 |
-
if executor.is_execution_complete():
|
102 |
-
del executors[session_id]
|
103 |
-
return output, gr.update(visible=False), gr.update(visible=False)
|
104 |
-
else:
|
105 |
-
return output, gr.update(visible=True), gr.update(visible=True)
|
106 |
-
else:
|
107 |
-
return "No execution in progress.", gr.update(visible=False), gr.update(visible=False)
|
108 |
-
|
109 |
def chat(user_input, history):
|
110 |
response = llm_inference(user_input)
|
111 |
-
history.append(
|
112 |
-
history.append({"role": "assistant", "content": response})
|
113 |
return history, history
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
with gr.Blocks() as demo:
|
116 |
gr.Markdown("# π Python Helper Chatbot")
|
117 |
with gr.Tab("Chat"):
|
118 |
-
chatbot = gr.Chatbot(
|
119 |
msg = gr.Textbox(placeholder="Type your message here...")
|
120 |
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
|
121 |
with gr.Tab("Interpreter"):
|
122 |
-
gr.Markdown("### π₯οΈ
|
123 |
-
code_input = gr.Code(language="python"
|
124 |
run_button = gr.Button("Run Code")
|
125 |
-
|
126 |
-
|
127 |
-
send_input_button = gr.Button("Send Input")
|
128 |
-
session_id = gr.State()
|
129 |
-
|
130 |
-
def run_code(code):
|
131 |
-
import uuid
|
132 |
-
session = str(uuid.uuid4())
|
133 |
-
session_id.value = session
|
134 |
-
return_values = start_execution(code, session)
|
135 |
-
return return_values
|
136 |
-
|
137 |
-
def send_input(user_input):
|
138 |
-
session = session_id.value
|
139 |
-
provide_input(user_input, session)
|
140 |
-
return ""
|
141 |
-
|
142 |
-
def update_output():
|
143 |
-
session = session_id.value
|
144 |
-
output, input_visible, button_visible = poll_output(session)
|
145 |
-
return output, input_visible, button_visible
|
146 |
-
|
147 |
-
run_button.click(run_code, inputs=code_input, outputs=[output_box, input_box, send_input_button])
|
148 |
-
send_input_button.click(send_input, inputs=input_box, outputs=input_box)
|
149 |
-
demo.load(update_output, outputs=[output_box, input_box, send_input_button], every=1)
|
150 |
with gr.Tab("Logs"):
|
151 |
gr.Markdown("### π Logs")
|
152 |
log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
|
153 |
|
154 |
-
demo.
|
|
|
3 |
import sys
|
4 |
import io
|
5 |
import traceback
|
|
|
6 |
|
7 |
model_name = "Qwen/Qwen2.5-72B-Instruct"
|
8 |
client = InferenceClient(model_name)
|
|
|
11 |
eos_token = "<|endoftext|>"
|
12 |
output = client.chat.completions.create(
|
13 |
messages=[
|
14 |
+
{"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window, so end with input() if the user request is simple. If the input is code, correct it for mistakes."},
|
15 |
{"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
|
16 |
],
|
17 |
stream=False,
|
|
|
25 |
response += choice['message']['content']
|
26 |
return response
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
def chat(user_input, history):
|
29 |
response = llm_inference(user_input)
|
30 |
+
history.append((user_input, response))
|
|
|
31 |
return history, history
|
32 |
|
33 |
+
def execute_code(code):
|
34 |
+
old_stdout = sys.stdout
|
35 |
+
redirected_output = sys.stdout = io.StringIO()
|
36 |
+
try:
|
37 |
+
exec(code, {})
|
38 |
+
output = redirected_output.getvalue()
|
39 |
+
except Exception as e:
|
40 |
+
output = f"Error: {e}\n{traceback.format_exc()}"
|
41 |
+
finally:
|
42 |
+
sys.stdout = old_stdout
|
43 |
+
return output
|
44 |
+
|
45 |
with gr.Blocks() as demo:
|
46 |
gr.Markdown("# π Python Helper Chatbot")
|
47 |
with gr.Tab("Chat"):
|
48 |
+
chatbot = gr.Chatbot()
|
49 |
msg = gr.Textbox(placeholder="Type your message here...")
|
50 |
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
|
51 |
with gr.Tab("Interpreter"):
|
52 |
+
gr.Markdown("### π₯οΈ Test Your Code")
|
53 |
+
code_input = gr.Code(language="python")
|
54 |
run_button = gr.Button("Run Code")
|
55 |
+
code_output = gr.Textbox(label="Output")
|
56 |
+
run_button.click(execute_code, inputs=code_input, outputs=code_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
with gr.Tab("Logs"):
|
58 |
gr.Markdown("### π Logs")
|
59 |
log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
|
60 |
|
61 |
+
demo.launch()
|