Spaces:
Sleeping
Sleeping
File size: 4,148 Bytes
712eefd 0489589 598d79e 1d88ef6 712eefd 1d88ef6 598d79e 712eefd 598d79e 0489589 712eefd 598d79e 712eefd 598d79e 42ff9fc 598d79e 0489589 598d79e 0489589 598d79e 0489589 598d79e 712eefd 0489589 598d79e 712eefd 598d79e 712eefd 598d79e 42ff9fc 598d79e 712eefd 598d79e 712eefd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
from huggingface_hub import InferenceClient
import os # For environment variables
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Generator function for streaming AI response
def generate_response(prompt_template, **kwargs):
prompt = os.getenv(prompt_template).format(**kwargs)
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Math-7B-Instruct",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=1024,
top_p=0.8,
stream=True
)
response = ""
for chunk in stream:
response += chunk.choices[0].delta.content
yield response # Yield incrementally
# Wrapper function for Gradio compatibility
def stream_response(prompt_template, **kwargs):
return "".join(generate_response(prompt_template, **kwargs))
# Gradio app interface
with gr.Blocks() as app:
gr.Markdown("## Mathematical Insight Tutor")
gr.Markdown("An advanced AI-powered tutor to help you master math concepts.")
def create_tab(tab_name, prompt_template, inputs):
with gr.Tab(tab_name):
input_fields = []
for inp in inputs:
if inp["type"] == "textbox":
input_fields.append(
gr.Textbox(lines=inp.get("lines", 1), label=inp["label"], placeholder=inp["placeholder"])
)
elif inp["type"] == "dropdown":
input_fields.append(
gr.Dropdown(choices=inp["choices"], label=inp["label"])
)
elif inp["type"] == "value":
input_fields.append(
gr.Textbox(label=inp["label"], placeholder=inp["placeholder"])
)
# Button and output
button = gr.Button(f"{tab_name} Execute")
output = gr.Textbox(lines=10, label="Output", placeholder="Generated response will appear here.")
# Link button to the response wrapper
button.click(
fn=lambda *args: stream_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
inputs=input_fields,
outputs=output
)
# Tabs for functionalities
create_tab(
"Solve a Problem",
"PROMPT_SOLVE",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Generate a Hint",
"PROMPT_HINT",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem for Hint", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Verify Solution",
"PROMPT_VERIFY",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "solution", "type": "value", "label": "Enter Your Solution", "placeholder": "e.g., x = 5"}
]
)
create_tab(
"Generate Practice Question",
"PROMPT_GENERATE",
[
{"key": "topic", "type": "textbox", "label": "Enter Math Topic", "placeholder": "e.g., Algebra, Calculus"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Explain Concept",
"PROMPT_EXPLAIN",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
# Launch the app
app.launch(debug=True)
|