File size: 3,902 Bytes
298d7d8
 
 
 
 
39dd26a
298d7d8
99ced92
298d7d8
 
 
 
 
 
 
39dd26a
 
 
 
 
 
 
 
298d7d8
 
 
 
 
 
 
 
 
 
 
 
f66a341
 
 
 
 
 
 
 
 
 
 
 
39dd26a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99ced92
39dd26a
 
 
99ced92
298d7d8
 
 
f66a341
298d7d8
 
99ced92
298d7d8
f66a341
 
298d7d8
f66a341
 
99ced92
298d7d8
 
 
 
99ced92
f66a341
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import gradio as gr
from huggingface_hub import InferenceClient
import sys
import io
import traceback
import re  # Import the regular expressions module

# Initialize the AI model
model_name = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(model_name)

def llm_inference(user_sample):
    eos_token = "<|endoftext|>"
    output = client.chat.completions.create(
        messages=[
            {
                "role": "system",
                "content": "You are a Python language guide. Write code on the user topic. If the input is code, correct it for mistakes."
            },
            {
                "role": "user",
                "content": f"Write only python code without any explanation: {user_sample}"
            },
        ],
        stream=False,
        temperature=0.7,
        top_p=0.1,
        max_tokens=412,
        stop=[eos_token]
    )
    response = ''
    for choice in output.choices:
        response += choice['message']['content']
    return response

def execute_code(code):
    old_stdout = sys.stdout
    redirected_output = sys.stdout = io.StringIO()
    try:
        exec(code, {})
        output = redirected_output.getvalue()
    except Exception as e:
        output = f"Error: {e}\n{traceback.format_exc()}"
    finally:
        sys.stdout = old_stdout
    return output

def is_math_task(user_input):
    """
    Simple heuristic to determine if the user input is a math task.
    This can be enhanced with more sophisticated methods or NLP techniques.
    """
    math_keywords = ['calculate', 'compute', 'solve', 'integrate', 'differentiate', 'derivative', 'integral', 'factorial', 'sum', 'product']
    operators = ['+', '-', '*', '/', '^', '**', 'sqrt', 'sin', 'cos', 'tan', 'log', 'exp']
    user_input_lower = user_input.lower()
    return any(keyword in user_input_lower for keyword in math_keywords) or any(op in user_input for op in operators)

def chat(user_input, history):
    """
    Handles the chat interaction. If the user input is detected as a math task,
    it generates Python code to solve it, strips any code tags, executes the code,
    and returns the result.
    """
    if is_math_task(user_input):
        # Generate Python code for the math task
        generated_code = llm_inference(f"Create a Python program to solve the following math problem:\n{user_input}")
        
        # Strip code tags using regex
        # This regex removes ```python and ``` or any other markdown code fences
        cleaned_code = re.sub(r"```(?:python)?\n?", "", generated_code).strip()
        cleaned_code = re.sub(r"```", "", cleaned_code).strip()
        
        # Execute the cleaned code
        execution_result = execute_code(cleaned_code)
        
        # Prepare the responses
        assistant_response = f"**Generated Python Code:**\n```python\n{cleaned_code}\n```\n\n**Execution Result:**\n```\n{execution_result}\n```"
    else:
        # For regular chat messages, use the AI's response
        assistant_response = llm_inference(user_input)
    
    # Append to chat history
    history.append((user_input, assistant_response))
    return history, history

with gr.Blocks() as demo:
    gr.Markdown("# 🐍 Python Helper Chatbot")
    with gr.Tab("Chat"):
        chatbot = gr.Chatbot()
        msg = gr.Textbox(placeholder="Type your message here...")
        msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
    
    with gr.Tab("Interpreter"):
        gr.Markdown("### πŸ–₯️ Test Your Code")
        code_input = gr.Code(language="python")
        run_button = gr.Button("Run Code")
        code_output = gr.Textbox(label="Output")
        run_button.click(execute_code, inputs=code_input, outputs=code_output)
    
    with gr.Tab("Logs"):
        gr.Markdown("### πŸ“œ Logs")
        log_output = gr.Textbox(label="Logs", lines=10, interactive=False)

# Launch the Gradio app
demo.launch()