Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import sys | |
import io | |
import traceback | |
# Initialize the AI model | |
model_name = "Qwen/Qwen2.5-72B-Instruct" | |
client = InferenceClient(model_name) | |
def llm_inference(user_sample): | |
eos_token = "<|endoftext|>" | |
output = client.chat.completions.create( | |
messages=[ | |
{"role": "system", "content": "You are a Python language guide. Write code on the user topic. If the input is code, correct it for mistakes."}, | |
{"role": "user", "content": f"Write only python code without any explanation: {user_sample}"}, | |
], | |
stream=False, | |
temperature=0.7, | |
top_p=0.1, | |
max_tokens=412, | |
stop=[eos_token] | |
) | |
response = '' | |
for choice in output.choices: | |
response += choice['message']['content'] | |
return response | |
def chat(user_input, history): | |
response = llm_inference(user_input) | |
history.append((user_input, response)) | |
return history, history | |
def execute_code(code): | |
old_stdout = sys.stdout | |
redirected_output = sys.stdout = io.StringIO() | |
try: | |
exec(code, {}) | |
output = redirected_output.getvalue() | |
except Exception as e: | |
output = f"Error: {e}\n{traceback.format_exc()}" | |
finally: | |
sys.stdout = old_stdout | |
return output | |
def solve_math_task(math_task): | |
# Generate Python code for the math task | |
generated_code = llm_inference(f"Create a Python program to solve the following math problem:\n{math_task}") | |
# Execute the generated code | |
execution_result = execute_code(generated_code) | |
return generated_code, execution_result | |
with gr.Blocks() as demo: | |
gr.Markdown("# π Python Helper Chatbot") | |
with gr.Tab("Chat"): | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(placeholder="Type your message here...") | |
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot]) | |
with gr.Tab("Interpreter"): | |
gr.Markdown("### π₯οΈ Test Your Code") | |
code_input = gr.Code(language="python") | |
run_button = gr.Button("Run Code") | |
code_output = gr.Textbox(label="Output") | |
run_button.click(execute_code, inputs=code_input, outputs=code_output) | |
with gr.Tab("Math Solver"): | |
gr.Markdown("### π Math Task Solver") | |
math_input = gr.Textbox(placeholder="Enter your mathematical task here...", lines=2) | |
solve_button = gr.Button("Solve Task") | |
with gr.Row(): | |
generated_code_output = gr.Code(label="Generated Python Code", language="python") | |
with gr.Row(): | |
execution_output = gr.Textbox(label="Execution Result", lines=10) | |
solve_button.click(solve_math_task, inputs=math_input, outputs=[generated_code_output, execution_output]) | |
with gr.Tab("Logs"): | |
gr.Markdown("### π Logs") | |
log_output = gr.Textbox(label="Logs", lines=10, interactive=False) | |
# Launch the Gradio app | |
demo.launch() | |