Spaces:
Sleeping
Sleeping
File size: 4,212 Bytes
712eefd 0489589 598d79e 077c11a 712eefd 1d88ef6 598d79e 712eefd 598d79e e3850d6 0489589 712eefd 598d79e 712eefd 95a4bda 712eefd 598d79e 42ff9fc 598d79e 95a4bda 0489589 598d79e 4cefc88 598d79e 626df00 598d79e 712eefd 0489589 598d79e 712eefd 598d79e 712eefd 598d79e 42ff9fc 598d79e 712eefd 598d79e 712eefd 95a4bda 712eefd 95a4bda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import gradio as gr
from huggingface_hub import InferenceClient
import os # For environment variables
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Generator function for streaming AI response
def generate_response(prompt_template, **kwargs):
prompt = os.getenv(prompt_template).format(**kwargs)
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Math-1.5B-Instruct",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=1024,
top_p=0.8,
stream=True
)
for chunk in stream:
yield chunk.choices[0].delta.content # Yield chunks as they are generated
# Gradio app interface
with gr.Blocks() as app:
gr.Markdown("## Mathematical Insight Tutor")
gr.Markdown("An advanced AI-powered tutor to help you master math concepts with step-by-step explanations and LaTeX rendering.")
def create_tab(tab_name, prompt_template, inputs):
with gr.Tab(tab_name):
input_fields = []
for inp in inputs:
if inp["type"] == "textbox":
input_fields.append(
gr.Textbox(lines=inp.get("lines", 1), label=inp["label"], placeholder=inp["placeholder"])
)
elif inp["type"] == "dropdown":
input_fields.append(
gr.Dropdown(choices=inp["choices"], label=inp["label"])
)
elif inp["type"] == "value":
input_fields.append(
gr.Textbox(label=inp["label"], placeholder=inp["placeholder"])
)
# Button and output
button = gr.Button(f"{tab_name} Execute")
output = gr.Markdown(label="Output", elem_id="latex-output")
# Link button to the response wrapper
button.click(
fn=lambda *args: generate_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
inputs=input_fields,
outputs=output,
api_name=f"/{tab_name.lower().replace(' ', '_')}_execute"
)
# Tabs for functionalities
create_tab(
"Solve a Problem",
"PROMPT_SOLVE",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Generate a Hint",
"PROMPT_HINT",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem for Hint", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Verify Solution",
"PROMPT_VERIFY",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "solution", "type": "value", "label": "Enter Your Solution", "placeholder": "e.g., x = 5"}
]
)
create_tab(
"Generate Practice Question",
"PROMPT_GENERATE",
[
{"key": "topic", "type": "textbox", "label": "Enter Math Topic", "placeholder": "e.g., Algebra, Calculus"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Explain Concept",
"PROMPT_EXPLAIN",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
# Add custom CSS for LaTeX rendering
app.css = """
#latex-output {
font-family: "Computer Modern", serif;
font-size: 16px;
line-height: 1.5;
}
"""
# Launch the app
app.launch(debug=True)
|