yaleh commited on
Commit
aaa4147
·
1 Parent(s): ac0b1c8

Updated layout.

Browse files
app/gradio_sample_generator.py CHANGED
@@ -1,5 +1,6 @@
 
1
  import gradio as gr
2
- from langchain_community.chat_models import ChatOpenAI
3
  from meta_prompt.sample_generator import TaskDescriptionGenerator
4
 
5
  def process_json(input_json, model_name, generating_batch_size, temperature):
 
1
+ import json
2
  import gradio as gr
3
+ from langchain_openai import ChatOpenAI
4
  from meta_prompt.sample_generator import TaskDescriptionGenerator
5
 
6
  def process_json(input_json, model_name, generating_batch_size, temperature):
app/streamlit_tab_app.py CHANGED
@@ -487,6 +487,10 @@ def generate_callback():
487
  except Exception as e:
488
  st.error(f"Error: {e}")
489
 
 
 
 
 
490
  # Meta Prompt Config
491
 
492
  pre_config_sources = [
@@ -605,7 +609,10 @@ with tab_prompting:
605
  st.markdown("Generate the prompt with the above input-output pairs.")
606
 
607
  # Create options for the selectbox
608
- sample_options = [f"Sample {i}: {row['Input'][:30]}..." for i, row in data_editor_data.iterrows()]
 
 
 
609
 
610
  # Create the selectbox
611
  selected_sample = st.selectbox(
@@ -619,124 +626,137 @@ with tab_prompting:
619
  on_click=generate_callback,
620
  type="primary", use_container_width=True)
621
 
622
- col1, col2 = st.columns(2)
623
 
624
- with col1:
625
- with st.expander("Advanced Inputs"):
626
- initial_system_message = st.text_area(
627
- "Initial System Message",
628
- key="initial_system_message"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629
  )
630
 
631
- col1_1, col1_2 = st.columns(2)
632
- with col1_1:
633
- pull_sample_description_button = st.button("Pull Scope Description", key="pull_sample_description",
634
- on_click=pull_sample_description)
635
- with col1_2:
636
- st.button("Pull Output", key="copy_system_message",
637
- on_click=copy_system_message)
638
- initial_acceptance_criteria = st.text_area(
639
- "Acceptance Criteria",
640
- key="initial_acceptance_criteria"
641
  )
642
- st.button("Pull Output", key="copy_acceptance_criteria",
643
- on_click=copy_acceptance_criteria)
644
-
645
- # New expander for model settings
646
- with st.expander("Model Settings"):
647
- model_tab = st.selectbox("Select Model Type", ["Simple", "Advanced", "Expert"], key="model_tab")
648
-
649
- if model_tab == "Simple":
650
- simple_model_name_input = st.selectbox(
651
- "Model Name",
652
- config.llms.keys(),
653
- index=0,
654
- )
655
- elif model_tab == "Advanced":
656
- advanced_optimizer_model_name_input = st.selectbox(
657
- "Optimizer Model Name",
658
- config.llms.keys(),
659
- index=0,
660
- )
661
- advanced_executor_model_name_input = st.selectbox(
662
- "Executor Model Name",
663
- config.llms.keys(),
664
- index=1,
665
- )
666
- else: # Expert
667
- expert_prompt_initial_developer_model_name_input = st.selectbox(
668
- "Initial Developer Model Name",
669
- config.llms.keys(),
670
- index=0,
671
- )
672
- expert_prompt_initial_developer_temperature_input = st.slider(
673
- "Initial Developer Temperature", 0.0, 1.0, 0.1, 0.1
674
- )
675
-
676
- expert_prompt_acceptance_criteria_model_name_input = st.selectbox(
677
- "Acceptance Criteria Model Name",
678
- config.llms.keys(),
679
- index=0,
680
- )
681
- expert_prompt_acceptance_criteria_temperature_input = st.slider(
682
- "Acceptance Criteria Temperature", 0.0, 1.0, 0.1, 0.1
683
- )
684
-
685
- expert_prompt_developer_model_name_input = st.selectbox(
686
- "Developer Model Name", config.llms.keys(), index=0
687
- )
688
- expert_prompt_developer_temperature_input = st.slider(
689
- "Developer Temperature", 0.0, 1.0, 0.1, 0.1
690
- )
691
-
692
- expert_prompt_executor_model_name_input = st.selectbox(
693
- "Executor Model Name", config.llms.keys(), index=1
694
- )
695
- expert_prompt_executor_temperature_input = st.slider(
696
- "Executor Temperature", 0.0, 1.0, 0.1, 0.1
697
- )
698
-
699
- expert_prompt_output_history_analyzer_model_name_input = st.selectbox(
700
- "Output History Analyzer Model Name",
701
- config.llms.keys(),
702
- index=0,
703
- )
704
- expert_prompt_output_history_analyzer_temperature_input = st.slider(
705
- "Output History Analyzer Temperature", 0.0, 1.0, 0.1, 0.1
706
- )
707
-
708
- expert_prompt_analyzer_model_name_input = st.selectbox(
709
- "Analyzer Model Name", config.llms.keys(), index=0
710
- )
711
- expert_prompt_analyzer_temperature_input = st.slider(
712
- "Analyzer Temperature", 0.0, 1.0, 0.1, 0.1
713
- )
714
-
715
- expert_prompt_suggester_model_name_input = st.selectbox(
716
- "Suggester Model Name", config.llms.keys(), index=0
717
- )
718
- expert_prompt_suggester_temperature_input = st.slider(
719
- "Suggester Temperature", 0.0, 1.0, 0.1, 0.1
720
- )
721
-
722
- prompt_template_group_input = st.selectbox(
723
- "Prompt Template Group", config.prompt_templates.keys(), index=0
724
  )
725
 
726
- recursion_limit_input = st.number_input("Recursion Limit", 1, 100, 16, 1)
727
- max_output_age_input = st.number_input("Max Output Age", 1, 10, 2, 1)
728
- aggressive_exploration_input = st.checkbox("Aggressive Exploration", False)
 
 
 
729
 
730
- with col2:
731
- system_message_output = st.text_area("System Message",
732
- key="system_message_output",
733
- height=100)
 
 
734
 
735
- acceptance_criteria_output = st.text_area(
736
- "Acceptance Criteria",
737
- key="acceptance_criteria_output",
738
- height=100)
739
- st.text_area("Output", st.session_state.output, height=100)
740
- st.text_area("Analysis", st.session_state.analysis, height=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741
 
742
- st.json(st.session_state.chat_log, expanded=False)
 
487
  except Exception as e:
488
  st.error(f"Error: {e}")
489
 
490
+ def clear_advanced_inputs():
491
+ st.session_state.initial_system_message = ""
492
+ st.session_state.initial_acceptance_criteria = ""
493
+
494
  # Meta Prompt Config
495
 
496
  pre_config_sources = [
 
609
  st.markdown("Generate the prompt with the above input-output pairs.")
610
 
611
  # Create options for the selectbox
612
+ try:
613
+ sample_options = [f"Sample {i}: {row['Input'][:30]}..." for i, row in data_editor_data.iterrows()]
614
+ except Exception as e:
615
+ sample_options = []
616
 
617
  # Create the selectbox
618
  selected_sample = st.selectbox(
 
626
  on_click=generate_callback,
627
  type="primary", use_container_width=True)
628
 
 
629
 
630
+ with st.expander("Advanced Inputs"):
631
+ initial_system_message = st.text_area(
632
+ "Initial System Message",
633
+ key="initial_system_message",
634
+ height=200,
635
+ placeholder="Enter the initial system message. It will be used as the base message for the prompt."
636
+ )
637
+
638
+ col1_1, col1_2 = st.columns(2)
639
+ with col1_1:
640
+ pull_sample_description_button = st.button("Pull Scope Description", key="pull_sample_description",
641
+ on_click=pull_sample_description)
642
+ with col1_2:
643
+ st.button("Pull Output", key="copy_system_message",
644
+ on_click=copy_system_message)
645
+ initial_acceptance_criteria = st.text_area(
646
+ "Acceptance Criteria",
647
+ key="initial_acceptance_criteria",
648
+ height=200,
649
+ placeholder="Enter the acceptance criteria. It will be used to evaluate the output."
650
+ )
651
+ st.button("Pull Output", key="copy_acceptance_criteria",
652
+ on_click=copy_acceptance_criteria)
653
+
654
+ st.button("Clear", on_click=clear_advanced_inputs)
655
+
656
+ # New expander for model settings
657
+ with st.expander("Model Settings"):
658
+ model_tab = st.selectbox("Select Model Type", ["Simple", "Advanced", "Expert"], key="model_tab")
659
+
660
+ if model_tab == "Simple":
661
+ simple_model_name_input = st.selectbox(
662
+ "Model Name",
663
+ config.llms.keys(),
664
+ index=0,
665
+ )
666
+ elif model_tab == "Advanced":
667
+ advanced_optimizer_model_name_input = st.selectbox(
668
+ "Optimizer Model Name",
669
+ config.llms.keys(),
670
+ index=0,
671
+ )
672
+ advanced_executor_model_name_input = st.selectbox(
673
+ "Executor Model Name",
674
+ config.llms.keys(),
675
+ index=1,
676
+ )
677
+ else: # Expert
678
+ expert_prompt_initial_developer_model_name_input = st.selectbox(
679
+ "Initial Developer Model Name",
680
+ config.llms.keys(),
681
+ index=0,
682
+ )
683
+ expert_prompt_initial_developer_temperature_input = st.slider(
684
+ "Initial Developer Temperature", 0.0, 1.0, 0.1, 0.1
685
  )
686
 
687
+ expert_prompt_acceptance_criteria_model_name_input = st.selectbox(
688
+ "Acceptance Criteria Model Name",
689
+ config.llms.keys(),
690
+ index=0,
 
 
 
 
 
 
691
  )
692
+ expert_prompt_acceptance_criteria_temperature_input = st.slider(
693
+ "Acceptance Criteria Temperature", 0.0, 1.0, 0.1, 0.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
694
  )
695
 
696
+ expert_prompt_developer_model_name_input = st.selectbox(
697
+ "Developer Model Name", config.llms.keys(), index=0
698
+ )
699
+ expert_prompt_developer_temperature_input = st.slider(
700
+ "Developer Temperature", 0.0, 1.0, 0.1, 0.1
701
+ )
702
 
703
+ expert_prompt_executor_model_name_input = st.selectbox(
704
+ "Executor Model Name", config.llms.keys(), index=1
705
+ )
706
+ expert_prompt_executor_temperature_input = st.slider(
707
+ "Executor Temperature", 0.0, 1.0, 0.1, 0.1
708
+ )
709
 
710
+ expert_prompt_output_history_analyzer_model_name_input = st.selectbox(
711
+ "Output History Analyzer Model Name",
712
+ config.llms.keys(),
713
+ index=0,
714
+ )
715
+ expert_prompt_output_history_analyzer_temperature_input = st.slider(
716
+ "Output History Analyzer Temperature", 0.0, 1.0, 0.1, 0.1
717
+ )
718
+
719
+ expert_prompt_analyzer_model_name_input = st.selectbox(
720
+ "Analyzer Model Name", config.llms.keys(), index=0
721
+ )
722
+ expert_prompt_analyzer_temperature_input = st.slider(
723
+ "Analyzer Temperature", 0.0, 1.0, 0.1, 0.1
724
+ )
725
+
726
+ expert_prompt_suggester_model_name_input = st.selectbox(
727
+ "Suggester Model Name", config.llms.keys(), index=0
728
+ )
729
+ expert_prompt_suggester_temperature_input = st.slider(
730
+ "Suggester Temperature", 0.0, 1.0, 0.1, 0.1
731
+ )
732
+
733
+ prompt_template_group_input = st.selectbox(
734
+ "Prompt Template Group", config.prompt_templates.keys(), index=0
735
+ )
736
+
737
+ recursion_limit_input = st.number_input("Recursion Limit", 1, 100, 16, 1)
738
+ max_output_age_input = st.number_input("Max Output Age", 1, 10, 2, 1)
739
+ aggressive_exploration_input = st.checkbox("Aggressive Exploration", False)
740
+
741
+ system_message_output = st.text_area(
742
+ "System Message",
743
+ key="system_message_output",
744
+ height=200,
745
+ placeholder="The generated system message will be shown here."
746
+ )
747
+ acceptance_criteria_output = st.text_area(
748
+ "Acceptance Criteria",
749
+ key="acceptance_criteria_output",
750
+ height=200,
751
+ placeholder="The (generated) acceptance criteria will be shown here."
752
+ )
753
+ output_output = st.text_area(
754
+ "Output", st.session_state.output, height=200,
755
+ placeholder="The output generated by the system message will be shown here."
756
+ )
757
+ analysis_output = st.text_area(
758
+ "Analysis", st.session_state.analysis, height=200,
759
+ placeholder="The analysis of the output will be shown here."
760
+ )
761
 
762
+ st.json(st.session_state.chat_log, expanded=False)
meta_prompt/sample_generator.py CHANGED
@@ -131,7 +131,7 @@ BRIEFS_PROMPT = [
131
 
132
  EXAMPLES_FROM_BRIEFS_PROMPT = [
133
  ("system", """{{
134
- "prompt": "Given the task type description, brief descriptions for new examples, and JSON example(s), generate {generating_batch_size} more input/output examples for this task type, strictly based on the brief descriptions. Ensure that the new examples are consistent with the brief descriptions and do not introduce any new information not present in the briefs. Output in JSON format, following `output_format`.",
135
  "output_format": "{{
136
  "examples": [
137
  {{
 
131
 
132
  EXAMPLES_FROM_BRIEFS_PROMPT = [
133
  ("system", """{{
134
+ "prompt": "Given the task type description, brief descriptions for new examples, and JSON example(s), generate {generating_batch_size} more input/output examples for this task type, strictly following the brief descriptions and task type description. Ensure that the new examples are consistent with the brief descriptions and do not introduce any new information not present in the briefs. Output in JSON format, following `output_format`. Validate the generated new examples with the task type description and brief descriptions.",
135
  "output_format": "{{
136
  "examples": [
137
  {{