Spaces:
Sleeping
Sleeping
File size: 5,642 Bytes
298d7d8 fb45bad 298d7d8 fb45bad 298d7d8 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 fb45bad 298d7d8 a39d560 298d7d8 a39d560 298d7d8 fb45bad f5aff24 298d7d8 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 fb45bad a39d560 298d7d8 a39d560 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
from huggingface_hub import InferenceClient
import sys
import io
import traceback
import threading
model_name = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(model_name)
def llm_inference(user_sample):
eos_token = "<|endoftext|>"
output = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window. If the input is code, correct it for mistakes."},
{"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
],
stream=False,
temperature=0.7,
top_p=0.1,
max_tokens=412,
stop=[eos_token]
)
response = ''
for choice in output.choices:
response += choice['message']['content']
return response
# A class to handle the execution environment per user session
class CodeExecutor:
def __init__(self):
self.local_vars = {}
self.output = ""
self.input_buffer = []
self.waiting_for_input = False
self.execution_thread = None
self.execution_complete = False
def custom_input(self, prompt=''):
self.output += prompt
self.waiting_for_input = True
while self.waiting_for_input:
pass # Wait until input is provided
return self.input_buffer.pop(0)
def execute_code(self, code):
def run_code():
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = io.StringIO()
# Replace input function
old_input = __builtins__.input
__builtins__.input = self.custom_input
try:
exec(code, {}, self.local_vars)
except Exception as e:
self.output += f"\nError: {e}\n{traceback.format_exc()}"
finally:
self.output += sys.stdout.getvalue()
sys.stdout = old_stdout
__builtins__.input = old_input
self.execution_complete = True
self.execution_thread = threading.Thread(target=run_code)
self.execution_thread.start()
def provide_input(self, user_input):
self.input_buffer.append(user_input)
self.waiting_for_input = False
def get_output(self):
if sys.stdout and hasattr(sys.stdout, "getvalue"):
self.output += sys.stdout.getvalue()
sys.stdout.seek(0)
sys.stdout.truncate(0)
return self.output
def is_execution_complete(self):
return self.execution_complete
executors = {}
def start_execution(code, session_id):
executor = CodeExecutor()
executors[session_id] = executor
executor.execute_code(code)
return "Code is running...", gr.update(visible=True), gr.update(visible=True)
def provide_input(user_input, session_id):
executor = executors.get(session_id)
if executor and not executor.is_execution_complete():
executor.provide_input(user_input)
return ""
else:
return "No execution in progress or execution has completed."
def poll_output(session_id):
executor = executors.get(session_id)
if executor:
output = executor.get_output()
if executor.is_execution_complete():
del executors[session_id]
return output, gr.update(visible=False), gr.update(visible=False)
else:
return output, gr.update(visible=True), gr.update(visible=True)
else:
return "No execution in progress.", gr.update(visible=False), gr.update(visible=False)
def chat(user_input, history):
response = llm_inference(user_input)
history.append({"role": "user", "content": user_input})
history.append({"role": "assistant", "content": response})
return history, history
with gr.Blocks() as demo:
gr.Markdown("# π Python Helper Chatbot")
with gr.Tab("Chat"):
chatbot = gr.Chatbot(type='messages')
msg = gr.Textbox(placeholder="Type your message here...")
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
with gr.Tab("Interpreter"):
gr.Markdown("### π₯οΈ Interactive Code Interpreter")
code_input = gr.Code(language="python", lines=20)
run_button = gr.Button("Run Code")
output_box = gr.Textbox(label="Output", lines=15)
input_box = gr.Textbox(label="Program Input", placeholder="Enter input for your program here...")
send_input_button = gr.Button("Send Input")
session_id = gr.State()
def run_code(code):
import uuid
session = str(uuid.uuid4())
session_id.value = session
return_values = start_execution(code, session)
return return_values
def send_input(user_input):
session = session_id.value
provide_input(user_input, session)
return ""
def update_output():
session = session_id.value
output, input_visible, button_visible = poll_output(session)
return output, input_visible, button_visible
run_button.click(run_code, inputs=code_input, outputs=[output_box, input_box, send_input_button])
send_input_button.click(send_input, inputs=input_box, outputs=input_box)
demo.load(update_output, outputs=[output_box, input_box, send_input_button], every=1)
with gr.Tab("Logs"):
gr.Markdown("### π Logs")
log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
demo.queue().launch()
|