baconnier commited on
Commit
5e4f469
1 Parent(s): bd73157

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +230 -0
app.py CHANGED
@@ -281,6 +281,236 @@ class GradioInterface:
281
  self.interface.launch(share=share)
282
 
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  if __name__ == '__main__':
285
  meta_info=""
286
  api_token = os.getenv('HF_API_TOKEN')
 
281
  self.interface.launch(share=share)
282
 
283
 
284
+ custom_css = """
285
+ .container {
286
+ border: 2px solid #2196F3;
287
+ border-radius: 10px;
288
+ padding: 12px;
289
+ margin: 6px;
290
+ background: white;
291
+ position: relative;
292
+ width: 100% !important;
293
+ max-width: 1200px !important;
294
+ margin: 0 auto 20px auto !important;
295
+ }
296
+
297
+ .container::before {
298
+ position: absolute;
299
+ top: -10px;
300
+ left: 20px;
301
+ background: white;
302
+ padding: 0 10px;
303
+ color: #2196F3;
304
+ font-weight: bold;
305
+ font-size: 1.2em;
306
+ }
307
+
308
+ .title-container {
309
+ width: fit-content !important;
310
+ margin: 0 auto !important;
311
+ padding: 2px 40px !important;
312
+ border: 1px solid #0066cc !important;
313
+ border-radius: 10px !important;
314
+ background-color: rgba(0, 102, 204, 0.05) !important;
315
+ }
316
+
317
+ .title-container * {
318
+ text-align: center;
319
+ margin: 0 !important;
320
+ line-height: 1.2 !important;
321
+ }
322
+
323
+ .title-container h1 {
324
+ font-size: 28px !important;
325
+ margin-bottom: 1px !important;
326
+ }
327
+
328
+ .title-container h3 {
329
+ font-size: 18px !important;
330
+ margin-bottom: 1px !important;
331
+ }
332
+
333
+ .title-container p {
334
+ font-size: 14px !important;
335
+ margin-bottom: 1px !important;
336
+ }
337
+
338
+ .input-container::before {
339
+ content: 'PROMPT REFINEMENT';
340
+ }
341
+
342
+ .analysis-container::before {
343
+ content: 'ANALYSIS';
344
+ }
345
+
346
+ .model-container::before {
347
+ content: 'MODEL APPLICATION';
348
+ }
349
+
350
+ .examples-container::before {
351
+ content: 'EXAMPLES';
352
+ }
353
+
354
+ /* Resizable textbox */
355
+ .input-container textarea {
356
+ resize: vertical !important;
357
+ min-height: 100px !important;
358
+ max-height: 500px !important;
359
+ width: 100% !important;
360
+ border: 1px solid #ddd !important;
361
+ border-radius: 4px !important;
362
+ padding: 8px !important;
363
+ transition: all 0.3s ease !important;
364
+ }
365
+
366
+ .input-container textarea:focus {
367
+ border-color: #2196F3 !important;
368
+ box-shadow: 0 0 0 2px rgba(33, 150, 243, 0.1) !important;
369
+ }
370
+
371
+ /* Radio group styling */
372
+ .radio-group {
373
+ background-color: rgba(0, 102, 204, 0.05) !important;
374
+ padding: 10px !important;
375
+ border-radius: 8px !important;
376
+ border: 1px solid rgba(0, 102, 204, 0.1) !important;
377
+ display: flex !important;
378
+ justify-content: center !important;
379
+ flex-wrap: wrap !important;
380
+ gap: 8px !important;
381
+ width: 100% !important;
382
+ }
383
+
384
+ .gradio-radio {
385
+ display: flex !important;
386
+ justify-content: center !important;
387
+ flex-wrap: wrap !important;
388
+ gap: 8px !important;
389
+ }
390
+
391
+ .gradio-radio label {
392
+ display: flex !important;
393
+ align-items: center !important;
394
+ padding: 6px 12px !important;
395
+ border: 1px solid #ddd !important;
396
+ border-radius: 4px !important;
397
+ cursor: pointer !important;
398
+ background: white !important;
399
+ margin: 4px !important;
400
+ }
401
+
402
+ .gradio-radio input[type="radio"]:checked + label {
403
+ background: rgba(0, 102, 204, 0.1) !important;
404
+ border-color: #0066cc !important;
405
+ color: #0066cc !important;
406
+ font-weight: bold !important;
407
+ }
408
+
409
+ /* Button styling */
410
+ .gradio-button {
411
+ background-color: white !important;
412
+ color: #2196F3 !important;
413
+ border: 2px solid #2196F3 !important;
414
+ border-radius: 4px !important;
415
+ padding: 8px 16px !important;
416
+ margin: 10px 0 !important;
417
+ font-weight: bold !important;
418
+ transition: all 0.3s ease !important;
419
+ }
420
+
421
+ .gradio-button:hover {
422
+ background-color: #2196F3 !important;
423
+ color: white !important;
424
+ box-shadow: 0 2px 5px rgba(33, 150, 243, 0.3) !important;
425
+ }
426
+
427
+ /* Accordion styling */
428
+ .gradio-accordion {
429
+ margin: 10px 0 !important;
430
+ border: none !important;
431
+ }
432
+
433
+ /* Container alignment */
434
+ .gradio-container {
435
+ display: flex !important;
436
+ flex-direction: column !important;
437
+ align-items: center !important;
438
+ width: 100% !important;
439
+ max-width: 1200px !important;
440
+ margin: 0 auto !important;
441
+ }
442
+
443
+ /* Dropdown styling */
444
+ .gradio-dropdown {
445
+ width: 100% !important;
446
+ max-width: 300px !important;
447
+ }
448
+
449
+ /* JSON container */
450
+ .full-response-json {
451
+ margin-top: 20px !important;
452
+ padding: 10px !important;
453
+ background-color: rgba(0, 102, 204, 0.05) !important;
454
+ border-radius: 8px !important;
455
+ }
456
+ """
457
+
458
+ metaprompt_explanations = {
459
+ "star": "Use ECHO when you need a comprehensive, multi-stage approach for complex prompts. It's ideal for tasks requiring in-depth analysis, exploration of multiple alternatives, and synthesis of ideas. Choose this over others when you have time for a thorough refinement process and need to consider various aspects of the prompt.",
460
+ "done": "Opt for this when you want a structured approach with emphasis on role-playing and advanced techniques. It's particularly useful for tasks that benefit from diverse perspectives and complex reasoning. Prefer this over 'physics' when you need a more detailed, step-by-step refinement process.",
461
+ "physics": "Select this when you need a balance between structure and advanced techniques, with a focus on role-playing. It's similar to 'done' but may be more suitable for scientific or technical prompts. Choose this over 'done' for a slightly less complex approach.",
462
+ "morphosis": "Use this simplified approach for straightforward prompts or when time is limited. It focuses on essential improvements without complex techniques. Prefer this over other methods when you need quick, clear refinements without extensive analysis.",
463
+ "verse": "Choose this method when you need to analyze and improve a prompt's strengths and weaknesses, with a focus on information flow. It's particularly useful for enhancing the logical structure of prompts. Use this over 'morphosis' when you need more depth but less complexity than 'star'.",
464
+ "phor": "Employ this advanced approach when you need to combine multiple prompt engineering techniques. It's ideal for complex tasks requiring both clarity and sophisticated prompting methods. Select this over 'star' when you want a more flexible, technique-focused approach.",
465
+ "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
466
+ }
467
+
468
+ models = [
469
+ # Meta-Llama models (all support system)
470
+ "meta-llama/Meta-Llama-3-70B-Instruct",
471
+ "meta-llama/Meta-Llama-3-8B-Instruct",
472
+ "meta-llama/Llama-3.1-70B-Instruct",
473
+ "meta-llama/Llama-3.1-8B-Instruct",
474
+ "meta-llama/Llama-3.2-3B-Instruct",
475
+ "meta-llama/Llama-3.2-1B-Instruct",
476
+ "meta-llama/Llama-2-13b-chat-hf",
477
+ "meta-llama/Llama-2-7b-chat-hf",
478
+
479
+ # HuggingFaceH4 models (support system)
480
+ "HuggingFaceH4/zephyr-7b-beta",
481
+ "HuggingFaceH4/zephyr-7b-alpha",
482
+
483
+ # Qwen models (support system)
484
+ "Qwen/Qwen2.5-72B-Instruct",
485
+ "Qwen/Qwen2.5-1.5B",
486
+
487
+ # Google models (supports system)
488
+ "google/gemma-1.1-2b-it"
489
+ ]
490
+
491
+ explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
492
+
493
+
494
+ import os
495
+
496
+ meta_info=""
497
+ api_token = os.getenv('HF_API_TOKEN')
498
+ if not api_token:
499
+ raise ValueError("HF_API_TOKEN not found in environment variables")
500
+
501
+ metadone = os.getenv('metadone')
502
+ prompt_refiner_model = os.getenv('prompt_refiner_model')
503
+ echo_prompt_refiner = os.getenv('echo_prompt_refiner')
504
+ metaprompt1 = os.getenv('metaprompt1')
505
+ loic_metaprompt = os.getenv('loic_metaprompt')
506
+ openai_metaprompt = os.getenv('openai_metaprompt')
507
+ original_meta_prompt = os.getenv('original_meta_prompt')
508
+ new_meta_prompt = os.getenv('new_meta_prompt')
509
+ advanced_meta_prompt = os.getenv('advanced_meta_prompt')
510
+ math_meta_prompt = os.getenv('metamath')
511
+ autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
512
+
513
+
514
  if __name__ == '__main__':
515
  meta_info=""
516
  api_token = os.getenv('HF_API_TOKEN')