Spaces:
Sleeping
Sleeping
File size: 4,211 Bytes
712eefd 598d79e 42ff9fc 712eefd 598d79e 712eefd 598d79e 712eefd 598d79e 712eefd 598d79e 42ff9fc 598d79e a9b9e4f 598d79e 712eefd 598d79e 712eefd 598d79e 712eefd 598d79e 42ff9fc 598d79e 712eefd 598d79e 712eefd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
from huggingface_hub import InferenceClient
import os # For environment variables
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Generic function to generate AI response with streaming
def generate_response(prompt_template, **kwargs):
# Format the prompt with provided arguments
prompt = os.getenv(prompt_template).format(**kwargs)
# Stream the response from the model
stream = client.chat.completions.create(
model="Qwen/QwQ-32B-Preview",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=1024,
top_p=0.8,
stream=True
)
# Stream chunks as they are generated
response = ""
for chunk in stream:
response += chunk.choices[0].delta.content
yield response
# Gradio app interface
with gr.Blocks() as app:
gr.Markdown("## Mathematical Insight Tutor")
gr.Markdown("An advanced AI-powered tutor to help you master math concepts.")
# Function to create a reusable tab with inputs and outputs
def create_tab(tab_name, prompt_template, inputs):
with gr.Tab(tab_name):
input_fields = []
# Create input components dynamically
for inp in inputs:
if inp["type"] == "textbox":
input_fields.append(
gr.Textbox(lines=inp.get("lines", 1), label=inp["label"], placeholder=inp["placeholder"])
)
elif inp["type"] == "dropdown":
input_fields.append(
gr.Dropdown(choices=inp["choices"], label=inp["label"])
)
elif inp["type"] == "value":
input_fields.append(
gr.Textbox(label=inp["label"], placeholder=inp["placeholder"])
)
# Button and output
button = gr.Button(f"{tab_name} Execute")
output = gr.Markdown()
# Link button to the response generator
button.click(
fn=lambda *args: generate_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
inputs=input_fields,
outputs=output,
stream=True # Enable streaming for Gradio
)
# Tabs for various functionalities
create_tab(
"Solve a Problem",
"PROMPT_SOLVE",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Generate a Hint",
"PROMPT_HINT",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem for Hint", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Verify Solution",
"PROMPT_VERIFY",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "solution", "type": "value", "label": "Enter Your Solution", "placeholder": "e.g., x = 5"}
]
)
create_tab(
"Generate Practice Question",
"PROMPT_GENERATE",
[
{"key": "topic", "type": "textbox", "label": "Enter Math Topic", "placeholder": "e.g., Algebra, Calculus"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
create_tab(
"Explain Concept",
"PROMPT_EXPLAIN",
[
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
]
)
# Launch the app
app.launch(debug=True)
|