Update app.py
Browse files
app.py
CHANGED
@@ -145,86 +145,146 @@ def run_inference_and_analysis(task, system_prompt, input_text, temperature, top
|
|
145 |
params
|
146 |
)
|
147 |
|
148 |
-
# Gradio interface
|
149 |
def gradio_interface():
|
150 |
-
with gr.Blocks(theme=gr.themes.Base(), title="
|
151 |
-
|
152 |
-
|
153 |
-
<div style="
|
154 |
-
<img src="https://raw.githubusercontent.com/pixeltable/pixeltable/main/docs/source/data/pixeltable-logo-large.png"
|
|
|
|
|
|
|
|
|
|
|
155 |
</div>
|
156 |
-
|
157 |
-
)
|
158 |
-
gr.Markdown(
|
159 |
-
"""
|
160 |
-
# Prompt Engineering and LLM Studio
|
161 |
-
|
162 |
-
This application demonstrates how [Pixeltable](https://github.com/pixeltable/pixeltable) can be used for rapid and incremental prompt engineering
|
163 |
-
and model comparison workflows. It showcases Pixeltable's ability to directly store, version, index,
|
164 |
-
and transform data while providing an interactive interface to experiment with different prompts and models.
|
165 |
-
|
166 |
-
Remember, effective prompt engineering often requires experimentation and iteration. Use this tool to systematically improve your prompts and understand how different inputs and parameters affect the LLM outputs.
|
167 |
-
"""
|
168 |
-
)
|
169 |
|
|
|
170 |
with gr.Row():
|
171 |
with gr.Column():
|
172 |
-
|
173 |
-
|
174 |
-
""
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
with gr.Column():
|
185 |
-
with gr.Accordion("How does it work?", open=False):
|
186 |
-
gr.Markdown(
|
187 |
-
"""
|
188 |
-
1. **Define your task**: This helps you keep track of different experiments.
|
189 |
-
2. **Set up your prompt**: Enter a system prompt in the "System Prompt" field. Write your specific input or question in the "Input Text" field
|
190 |
-
3. **Adjust parameters (optional)**: Adjust temperature, top_p, token limits, etc., to control the model's output.
|
191 |
-
4. **Run the analysis**: Click the "Run Inference and Analysis" button.
|
192 |
-
5. **Review the results**: Compare the responses from both models and exmaine the scores.
|
193 |
-
6. **Iterate and refine**: Based on the results, refine your prompt or adjust parameters.
|
194 |
-
"""
|
195 |
-
)
|
196 |
|
197 |
-
with gr.Row():
|
198 |
with gr.Column():
|
199 |
-
|
200 |
-
|
201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
210 |
|
211 |
-
|
|
|
|
|
|
|
212 |
|
|
|
213 |
with gr.Tabs():
|
214 |
-
with gr.
|
215 |
-
history = gr.
|
216 |
-
headers=["Task", "System Prompt", "Input Text"
|
217 |
-
wrap=True
|
|
|
218 |
)
|
219 |
|
220 |
-
with gr.
|
221 |
-
responses = gr.
|
222 |
-
headers=["Timestamp", "Open-Mistral-Nemo
|
223 |
-
wrap=True
|
|
|
224 |
)
|
225 |
|
226 |
-
with gr.
|
227 |
-
analysis = gr.
|
228 |
headers=[
|
229 |
"Timestamp",
|
230 |
"Open-Mistral-Nemo Sentiment",
|
@@ -234,74 +294,66 @@ def gradio_interface():
|
|
234 |
"Open-Mistral-Nemo Readability",
|
235 |
"Mistral-Medium Readability"
|
236 |
],
|
237 |
-
wrap=True
|
|
|
238 |
)
|
239 |
-
|
240 |
-
with gr.Tab("Model Parameters"):
|
241 |
-
params = gr.Dataframe(
|
242 |
-
headers=[
|
243 |
-
"Timestamp",
|
244 |
-
"Temperature",
|
245 |
-
"Top P",
|
246 |
-
"Max Tokens",
|
247 |
-
"Stop Sequences",
|
248 |
-
"Random Seed",
|
249 |
-
"Safe Prompt"
|
250 |
-
],
|
251 |
-
wrap=True
|
252 |
-
)
|
253 |
-
|
254 |
-
with gr.Column():
|
255 |
-
omn_response = gr.Textbox(label="Open-Mistral-Nemo Response")
|
256 |
-
ml_response = gr.Textbox(label="Mistral-Medium Response")
|
257 |
-
|
258 |
-
with gr.Row():
|
259 |
-
large_sentiment = gr.Number(label="Mistral-Medium Sentiment")
|
260 |
-
open_sentiment = gr.Number(label="Open-Mistral-Nemo Sentiment")
|
261 |
-
|
262 |
-
with gr.Row():
|
263 |
-
large_keywords = gr.Textbox(label="Mistral-Medium Keywords")
|
264 |
-
open_keywords = gr.Textbox(label="Open-Mistral-Nemo Keywords")
|
265 |
-
|
266 |
-
with gr.Row():
|
267 |
-
large_readability = gr.Number(label="Mistral-Medium Readability")
|
268 |
-
open_readability = gr.Number(label="Open-Mistral-Nemo Readability")
|
269 |
|
270 |
-
#
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
|
|
292 |
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
submit_btn.click(
|
300 |
run_inference_and_analysis,
|
301 |
inputs=[task, system_prompt, input_text, temperature, top_p, max_tokens, stop, random_seed, safe_prompt],
|
302 |
-
outputs=[omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords,
|
|
|
303 |
)
|
304 |
-
|
305 |
return demo
|
306 |
|
307 |
# Launch the Gradio interface
|
|
|
145 |
params
|
146 |
)
|
147 |
|
|
|
148 |
def gradio_interface():
|
149 |
+
with gr.Blocks(theme=gr.themes.Base(), title="Pixeltable LLM Studio") as demo:
|
150 |
+
# Enhanced Header with Branding
|
151 |
+
gr.HTML("""
|
152 |
+
<div style="text-align: center; padding: 20px; background: linear-gradient(to right, #4F46E5, #7C3AED);" class="shadow-lg">
|
153 |
+
<img src="https://raw.githubusercontent.com/pixeltable/pixeltable/main/docs/source/data/pixeltable-logo-large.png"
|
154 |
+
alt="Pixeltable" style="max-width: 200px; margin-bottom: 15px;" />
|
155 |
+
<h1 style="color: white; font-size: 2.5rem; margin-bottom: 10px;">LLM Studio</h1>
|
156 |
+
<p style="color: #E5E7EB; font-size: 1.1rem;">
|
157 |
+
Powered by Pixeltable's Unified AI Data Infrastructure
|
158 |
+
</p>
|
159 |
</div>
|
160 |
+
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
+
# Product Overview Cards
|
163 |
with gr.Row():
|
164 |
with gr.Column():
|
165 |
+
gr.HTML("""
|
166 |
+
<div style="padding: 20px; background-color: white; border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); margin: 10px;">
|
167 |
+
<h3 style="color: #4F46E5; margin-bottom: 10px;">π Why Pixeltable?</h3>
|
168 |
+
<ul style="list-style-type: none; padding-left: 0;">
|
169 |
+
<li style="margin-bottom: 8px;">β¨ Unified data management for AI workflows</li>
|
170 |
+
<li style="margin-bottom: 8px;">π Automatic versioning and lineage tracking</li>
|
171 |
+
<li style="margin-bottom: 8px;">β‘ Seamless model integration and deployment</li>
|
172 |
+
<li style="margin-bottom: 8px;">π Advanced querying and analysis capabilities</li>
|
173 |
+
</ul>
|
174 |
+
</div>
|
175 |
+
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
|
|
|
177 |
with gr.Column():
|
178 |
+
gr.HTML("""
|
179 |
+
<div style="padding: 20px; background-color: white; border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); margin: 10px;">
|
180 |
+
<h3 style="color: #4F46E5; margin-bottom: 10px;">π‘ Features</h3>
|
181 |
+
<ul style="list-style-type: none; padding-left: 0;">
|
182 |
+
<li style="margin-bottom: 8px;">π Compare multiple LLM models side-by-side</li>
|
183 |
+
<li style="margin-bottom: 8px;">π Track and analyze model performance</li>
|
184 |
+
<li style="margin-bottom: 8px;">π― Experiment with different prompts and parameters</li>
|
185 |
+
<li style="margin-bottom: 8px;">π Automatic analysis with sentiment and readability scores</li>
|
186 |
+
</ul>
|
187 |
+
</div>
|
188 |
+
""")
|
189 |
+
|
190 |
+
# Main Interface
|
191 |
+
with gr.Tabs() as tabs:
|
192 |
+
with gr.TabItem("π― Experiment", id=0):
|
193 |
+
with gr.Row():
|
194 |
+
with gr.Column(scale=1):
|
195 |
+
gr.HTML("""
|
196 |
+
<div style="padding: 15px; background-color: #F3F4F6; border-radius: 8px; margin-bottom: 15px;">
|
197 |
+
<h3 style="color: #4F46E5; margin-bottom: 10px;">Experiment Setup</h3>
|
198 |
+
<p style="color: #6B7280; font-size: 0.9rem;">Configure your prompt engineering experiment below</p>
|
199 |
+
</div>
|
200 |
+
""")
|
201 |
+
|
202 |
+
task = gr.Textbox(
|
203 |
+
label="Task Category",
|
204 |
+
placeholder="e.g., Sentiment Analysis, Text Generation, Summarization",
|
205 |
+
elem_classes="input-style"
|
206 |
+
)
|
207 |
+
system_prompt = gr.Textbox(
|
208 |
+
label="System Prompt",
|
209 |
+
placeholder="Define the AI's role and task...",
|
210 |
+
lines=3,
|
211 |
+
elem_classes="input-style"
|
212 |
+
)
|
213 |
+
input_text = gr.Textbox(
|
214 |
+
label="Input Text",
|
215 |
+
placeholder="Enter your prompt or text to analyze...",
|
216 |
+
lines=4,
|
217 |
+
elem_classes="input-style"
|
218 |
+
)
|
219 |
+
|
220 |
+
with gr.Accordion("π οΈ Advanced Settings", open=False):
|
221 |
+
temperature = gr.Slider(minimum=0, maximum=1, value=0.7, step=0.1, label="Temperature")
|
222 |
+
top_p = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.1, label="Top P")
|
223 |
+
max_tokens = gr.Number(label="Max Tokens", value=300)
|
224 |
+
stop = gr.Textbox(label="Stop Sequences (comma-separated)")
|
225 |
+
random_seed = gr.Number(label="Random Seed", value=None)
|
226 |
+
safe_prompt = gr.Checkbox(label="Safe Prompt", value=False)
|
227 |
+
|
228 |
+
submit_btn = gr.Button(
|
229 |
+
"π Run Analysis",
|
230 |
+
variant="primary",
|
231 |
+
scale=1,
|
232 |
+
min_width=200
|
233 |
+
)
|
234 |
+
|
235 |
+
with gr.Column(scale=1):
|
236 |
+
gr.HTML("""
|
237 |
+
<div style="padding: 15px; background-color: #F3F4F6; border-radius: 8px; margin-bottom: 15px;">
|
238 |
+
<h3 style="color: #4F46E5; margin-bottom: 10px;">Results</h3>
|
239 |
+
<p style="color: #6B7280; font-size: 0.9rem;">Compare model outputs and analysis metrics</p>
|
240 |
+
</div>
|
241 |
+
""")
|
242 |
+
|
243 |
+
with gr.Group():
|
244 |
+
omn_response = gr.Textbox(
|
245 |
+
label="Open-Mistral-Nemo Response",
|
246 |
+
elem_classes="output-style"
|
247 |
+
)
|
248 |
+
ml_response = gr.Textbox(
|
249 |
+
label="Mistral-Medium Response",
|
250 |
+
elem_classes="output-style"
|
251 |
+
)
|
252 |
|
253 |
+
with gr.Group():
|
254 |
+
with gr.Row():
|
255 |
+
with gr.Column():
|
256 |
+
gr.HTML("<h4>π Sentiment Analysis</h4>")
|
257 |
+
large_sentiment = gr.Number(label="Mistral-Medium")
|
258 |
+
open_sentiment = gr.Number(label="Open-Mistral-Nemo")
|
259 |
+
|
260 |
+
with gr.Column():
|
261 |
+
gr.HTML("<h4>π Readability Scores</h4>")
|
262 |
+
large_readability = gr.Number(label="Mistral-Medium")
|
263 |
+
open_readability = gr.Number(label="Open-Mistral-Nemo")
|
264 |
|
265 |
+
gr.HTML("<h4>π Key Terms</h4>")
|
266 |
+
with gr.Row():
|
267 |
+
large_keywords = gr.Textbox(label="Mistral-Medium Keywords")
|
268 |
+
open_keywords = gr.Textbox(label="Open-Mistral-Nemo Keywords")
|
269 |
|
270 |
+
with gr.TabItem("π History & Analysis", id=1):
|
271 |
with gr.Tabs():
|
272 |
+
with gr.TabItem("Prompt History"):
|
273 |
+
history = gr.DataFrame(
|
274 |
+
headers=["Timestamp", "Task", "System Prompt", "Input Text"],
|
275 |
+
wrap=True,
|
276 |
+
elem_classes="table-style"
|
277 |
)
|
278 |
|
279 |
+
with gr.TabItem("Model Responses"):
|
280 |
+
responses = gr.DataFrame(
|
281 |
+
headers=["Timestamp", "Open-Mistral-Nemo", "Mistral-Medium"],
|
282 |
+
wrap=True,
|
283 |
+
elem_classes="table-style"
|
284 |
)
|
285 |
|
286 |
+
with gr.TabItem("Analysis Results"):
|
287 |
+
analysis = gr.DataFrame(
|
288 |
headers=[
|
289 |
"Timestamp",
|
290 |
"Open-Mistral-Nemo Sentiment",
|
|
|
294 |
"Open-Mistral-Nemo Readability",
|
295 |
"Mistral-Medium Readability"
|
296 |
],
|
297 |
+
wrap=True,
|
298 |
+
elem_classes="table-style"
|
299 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
+
# Footer with links and additional info
|
302 |
+
gr.HTML("""
|
303 |
+
<div style="text-align: center; padding: 20px; margin-top: 30px; border-top: 1px solid #E5E7EB;">
|
304 |
+
<div style="margin-bottom: 20px;">
|
305 |
+
<h3 style="color: #4F46E5;">Built with Pixeltable</h3>
|
306 |
+
<p style="color: #6B7280;">The unified data infrastructure for AI applications</p>
|
307 |
+
</div>
|
308 |
+
<div style="display: flex; justify-content: center; gap: 20px;">
|
309 |
+
<a href="https://github.com/pixeltable/pixeltable" target="_blank"
|
310 |
+
style="color: #4F46E5; text-decoration: none;">
|
311 |
+
π Documentation
|
312 |
+
</a>
|
313 |
+
<a href="https://github.com/pixeltable/pixeltable" target="_blank"
|
314 |
+
style="color: #4F46E5; text-decoration: none;">
|
315 |
+
π» GitHub
|
316 |
+
</a>
|
317 |
+
<a href="https://join.slack.com/t/pixeltablecommunity/shared_invite/zt-21fybjbn2-fZC_SJiuG6QL~Ai8T6VpFQ" target="_blank"
|
318 |
+
style="color: #4F46E5; text-decoration: none;">
|
319 |
+
π¬ Community
|
320 |
+
</a>
|
321 |
+
</div>
|
322 |
+
</div>
|
323 |
+
""")
|
324 |
|
325 |
+
# Custom CSS
|
326 |
+
gr.HTML("""
|
327 |
+
<style>
|
328 |
+
.input-style {
|
329 |
+
border: 1px solid #E5E7EB !important;
|
330 |
+
border-radius: 8px !important;
|
331 |
+
padding: 12px !important;
|
332 |
+
}
|
333 |
+
.output-style {
|
334 |
+
background-color: #F9FAFB !important;
|
335 |
+
border-radius: 8px !important;
|
336 |
+
padding: 12px !important;
|
337 |
+
}
|
338 |
+
.table-style {
|
339 |
+
border-collapse: collapse !important;
|
340 |
+
width: 100% !important;
|
341 |
+
}
|
342 |
+
.table-style th {
|
343 |
+
background-color: #F3F4F6 !important;
|
344 |
+
padding: 12px !important;
|
345 |
+
}
|
346 |
+
</style>
|
347 |
+
""")
|
348 |
+
|
349 |
+
# Setup event handlers
|
350 |
submit_btn.click(
|
351 |
run_inference_and_analysis,
|
352 |
inputs=[task, system_prompt, input_text, temperature, top_p, max_tokens, stop, random_seed, safe_prompt],
|
353 |
+
outputs=[omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords,
|
354 |
+
large_readability, open_readability, history, responses, analysis, params]
|
355 |
)
|
356 |
+
|
357 |
return demo
|
358 |
|
359 |
# Launch the Gradio interface
|