Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,11 +5,9 @@ import os # For environment variables
|
|
5 |
# Initialize the Hugging Face Inference Client
|
6 |
client = InferenceClient()
|
7 |
|
8 |
-
#
|
9 |
def generate_response(prompt_template, **kwargs):
|
10 |
-
# Format the prompt with provided arguments
|
11 |
prompt = os.getenv(prompt_template).format(**kwargs)
|
12 |
-
# Stream the response from the model
|
13 |
stream = client.chat.completions.create(
|
14 |
model="Qwen/QwQ-32B-Preview",
|
15 |
messages=[{"role": "user", "content": prompt}],
|
@@ -18,22 +16,23 @@ def generate_response(prompt_template, **kwargs):
|
|
18 |
top_p=0.8,
|
19 |
stream=True
|
20 |
)
|
21 |
-
# Stream chunks as they are generated
|
22 |
response = ""
|
23 |
for chunk in stream:
|
24 |
response += chunk.choices[0].delta.content
|
25 |
-
yield response
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# Gradio app interface
|
28 |
with gr.Blocks() as app:
|
29 |
gr.Markdown("## Mathematical Insight Tutor")
|
30 |
gr.Markdown("An advanced AI-powered tutor to help you master math concepts.")
|
31 |
|
32 |
-
# Function to create a reusable tab with inputs and outputs
|
33 |
def create_tab(tab_name, prompt_template, inputs):
|
34 |
with gr.Tab(tab_name):
|
35 |
input_fields = []
|
36 |
-
# Create input components dynamically
|
37 |
for inp in inputs:
|
38 |
if inp["type"] == "textbox":
|
39 |
input_fields.append(
|
@@ -49,16 +48,15 @@ with gr.Blocks() as app:
|
|
49 |
)
|
50 |
# Button and output
|
51 |
button = gr.Button(f"{tab_name} Execute")
|
52 |
-
output = gr.
|
53 |
-
# Link button to the response
|
54 |
button.click(
|
55 |
-
fn=lambda *args:
|
56 |
inputs=input_fields,
|
57 |
-
outputs=output
|
58 |
-
stream=True # Enable streaming for Gradio
|
59 |
)
|
60 |
|
61 |
-
# Tabs for
|
62 |
create_tab(
|
63 |
"Solve a Problem",
|
64 |
"PROMPT_SOLVE",
|
|
|
5 |
# Initialize the Hugging Face Inference Client
|
6 |
client = InferenceClient()
|
7 |
|
8 |
+
# Generator function for streaming AI response
|
9 |
def generate_response(prompt_template, **kwargs):
|
|
|
10 |
prompt = os.getenv(prompt_template).format(**kwargs)
|
|
|
11 |
stream = client.chat.completions.create(
|
12 |
model="Qwen/QwQ-32B-Preview",
|
13 |
messages=[{"role": "user", "content": prompt}],
|
|
|
16 |
top_p=0.8,
|
17 |
stream=True
|
18 |
)
|
|
|
19 |
response = ""
|
20 |
for chunk in stream:
|
21 |
response += chunk.choices[0].delta.content
|
22 |
+
yield response # Yield incrementally
|
23 |
+
|
24 |
+
# Wrapper function for Gradio compatibility
|
25 |
+
def stream_response(prompt_template, **kwargs):
|
26 |
+
return "".join(generate_response(prompt_template, **kwargs))
|
27 |
|
28 |
# Gradio app interface
|
29 |
with gr.Blocks() as app:
|
30 |
gr.Markdown("## Mathematical Insight Tutor")
|
31 |
gr.Markdown("An advanced AI-powered tutor to help you master math concepts.")
|
32 |
|
|
|
33 |
def create_tab(tab_name, prompt_template, inputs):
|
34 |
with gr.Tab(tab_name):
|
35 |
input_fields = []
|
|
|
36 |
for inp in inputs:
|
37 |
if inp["type"] == "textbox":
|
38 |
input_fields.append(
|
|
|
48 |
)
|
49 |
# Button and output
|
50 |
button = gr.Button(f"{tab_name} Execute")
|
51 |
+
output = gr.Textbox(lines=10, label="Output", placeholder="Generated response will appear here.")
|
52 |
+
# Link button to the response wrapper
|
53 |
button.click(
|
54 |
+
fn=lambda *args: stream_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
|
55 |
inputs=input_fields,
|
56 |
+
outputs=output
|
|
|
57 |
)
|
58 |
|
59 |
+
# Tabs for functionalities
|
60 |
create_tab(
|
61 |
"Solve a Problem",
|
62 |
"PROMPT_SOLVE",
|