PythonHelper / app.py
Ilya
Update app.py
fb45bad verified
raw
history blame
5.18 kB
import gradio as gr
from huggingface_hub import InferenceClient
import sys
import io
import traceback
import threading
model_name = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(model_name)
def llm_inference(user_sample):
eos_token = "<|endoftext|>"
output = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window. If the input is code, correct it for mistakes."},
{"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
],
stream=False,
temperature=0.7,
top_p=0.1,
max_tokens=412,
stop=[eos_token]
)
response = ''
for choice in output.choices:
response += choice['message']['content']
return response
# A class to handle the execution environment per user session
class CodeExecutor:
def __init__(self):
self.local_vars = {}
self.output = ""
self.input_buffer = []
self.waiting_for_input = False
self.execution_thread = None
self.execution_complete = False
def custom_input(self, prompt=''):
self.waiting_for_input = True
while self.waiting_for_input:
pass # Wait until input is provided
return self.input_buffer.pop(0)
def execute_code(self, code):
def run_code():
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = io.StringIO()
# Replace input function
old_input = __builtins__.input
__builtins__.input = self.custom_input
try:
exec(code, {}, self.local_vars)
except Exception as e:
self.output += f"\nError: {e}\n{traceback.format_exc()}"
finally:
self.output += sys.stdout.getvalue()
sys.stdout = old_stdout
__builtins__.input = old_input
self.execution_complete = True
self.execution_thread = threading.Thread(target=run_code)
self.execution_thread.start()
def provide_input(self, user_input):
self.input_buffer.append(user_input)
self.waiting_for_input = False
def get_output(self):
return self.output
def is_execution_complete(self):
return self.execution_complete
executors = {}
def start_execution(code, session_id):
executor = CodeExecutor()
executors[session_id] = executor
executor.execute_code(code)
return "Execution started. Waiting for output..."
def provide_input(user_input, session_id):
executor = executors.get(session_id)
if executor and not executor.is_execution_complete():
executor.provide_input(user_input)
return "Input received."
else:
return "No execution in progress or execution has completed."
def get_output(session_id):
executor = executors.get(session_id)
if executor:
output = executor.get_output()
if executor.is_execution_complete():
del executors[session_id]
return output
else:
return "No execution in progress."
def chat(user_input, history):
response = llm_inference(user_input)
history.append((user_input, response))
return history, history
with gr.Blocks() as demo:
gr.Markdown("# 🐍 Python Helper Chatbot")
with gr.Tab("Chat"):
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Type your message here...")
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
with gr.Tab("Interpreter"):
gr.Markdown("### πŸ–₯️ Interactive Code Interpreter")
code_input = gr.Code(language="python", lines=20)
run_button = gr.Button("Run Code")
output_box = gr.Textbox(label="Output", lines=15)
input_box = gr.Textbox(label="Program Input", placeholder="Enter input for your program here...")
send_input_button = gr.Button("Send Input")
session_id = gr.State()
def run_code(code):
from uuid import uuid4
session = str(uuid4())
session_id.set(session)
start_execution(code, session)
return "Code is running...", gr.update(visible=True), gr.update(visible=True)
def send_input(user_input):
session = session_id.get()
provide_input(user_input, session)
return ""
def update_output():
session = session_id.get()
if session:
output = get_output(session)
return output
else:
return "No execution in progress."
run_button.click(run_code, inputs=code_input, outputs=[output_box, input_box, send_input_button])
send_input_button.click(send_input, inputs=input_box, outputs=input_box)
output_box.change(update_output, outputs=output_box).every(1)
with gr.Tab("Logs"):
gr.Markdown("### πŸ“œ Logs")
log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
demo.launch()