diff --git a/.gradio/certificate.pem b/.gradio/certificate.pem
new file mode 100644
index 0000000000000000000000000000000000000000..b85c8037f6b60976b2546fdbae88312c5246d9a3
--- /dev/null
+++ b/.gradio/certificate.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
diff --git a/.history/app_20250403101057.py b/.history/app_20250403101057.py
new file mode 100644
index 0000000000000000000000000000000000000000..289a5b645f687496860867560bac9640f8649d0a
--- /dev/null
+++ b/.history/app_20250403101057.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403105942.py b/.history/app_20250403105942.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fc9a86fec591062701762be7b83b7f4881febd7
--- /dev/null
+++ b/.history/app_20250403105942.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown(" ## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403105943.py b/.history/app_20250403105943.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fc9a86fec591062701762be7b83b7f4881febd7
--- /dev/null
+++ b/.history/app_20250403105943.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown(" ## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403110426.py b/.history/app_20250403110426.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fc9a86fec591062701762be7b83b7f4881febd7
--- /dev/null
+++ b/.history/app_20250403110426.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown(" ## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403110505.py b/.history/app_20250403110505.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e0210084faa76829ed1ebf45bacf19e33dc267a
--- /dev/null
+++ b/.history/app_20250403110505.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403110510.py b/.history/app_20250403110510.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e0210084faa76829ed1ebf45bacf19e33dc267a
--- /dev/null
+++ b/.history/app_20250403110510.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111148.py b/.history/app_20250403111148.py
new file mode 100644
index 0000000000000000000000000000000000000000..59865831663e29b1d83e2eb2e0b51758a1f0049c
--- /dev/null
+++ b/.history/app_20250403111148.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111153.py b/.history/app_20250403111153.py
new file mode 100644
index 0000000000000000000000000000000000000000..59865831663e29b1d83e2eb2e0b51758a1f0049c
--- /dev/null
+++ b/.history/app_20250403111153.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111234.py b/.history/app_20250403111234.py
new file mode 100644
index 0000000000000000000000000000000000000000..44c72de06088fb1abbcaa950c946547dda807ea9
--- /dev/null
+++ b/.history/app_20250403111234.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111235.py b/.history/app_20250403111235.py
new file mode 100644
index 0000000000000000000000000000000000000000..44c72de06088fb1abbcaa950c946547dda807ea9
--- /dev/null
+++ b/.history/app_20250403111235.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111239.py b/.history/app_20250403111239.py
new file mode 100644
index 0000000000000000000000000000000000000000..44c72de06088fb1abbcaa950c946547dda807ea9
--- /dev/null
+++ b/.history/app_20250403111239.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111437.py b/.history/app_20250403111437.py
new file mode 100644
index 0000000000000000000000000000000000000000..39acfbbb7b3d5eb09d2172d34d7158065d527d2f
--- /dev/null
+++ b/.history/app_20250403111437.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111440.py b/.history/app_20250403111440.py
new file mode 100644
index 0000000000000000000000000000000000000000..39acfbbb7b3d5eb09d2172d34d7158065d527d2f
--- /dev/null
+++ b/.history/app_20250403111440.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111446.py b/.history/app_20250403111446.py
new file mode 100644
index 0000000000000000000000000000000000000000..39acfbbb7b3d5eb09d2172d34d7158065d527d2f
--- /dev/null
+++ b/.history/app_20250403111446.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111513.py b/.history/app_20250403111513.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3920ca08e9c02b9d760d9c106d69fb00c13528f
--- /dev/null
+++ b/.history/app_20250403111513.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403111519.py b/.history/app_20250403111519.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3920ca08e9c02b9d760d9c106d69fb00c13528f
--- /dev/null
+++ b/.history/app_20250403111519.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403131001.py b/.history/app_20250403131001.py
new file mode 100644
index 0000000000000000000000000000000000000000..eced334d4d3d72d2022042ba730b93b3e693ed10
--- /dev/null
+++ b/.history/app_20250403131001.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# [MLR- Copilot: Machine Learning Research based on LLM Agents](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch(share=True)
\ No newline at end of file
diff --git a/.history/app_20250403131149.py b/.history/app_20250403131149.py
new file mode 100644
index 0000000000000000000000000000000000000000..02e25cf8795d1a21b4b9a5b61f8caf536938d934
--- /dev/null
+++ b/.history/app_20250403131149.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# [MLR- Copilot: Machine Learning Research based on LLM Agents](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/.history/app_20250403131255.py b/.history/app_20250403131255.py
new file mode 100644
index 0000000000000000000000000000000000000000..02e25cf8795d1a21b4b9a5b61f8caf536938d934
--- /dev/null
+++ b/.history/app_20250403131255.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# [MLR- Copilot: Machine Learning Research based on LLM Agents](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/.history/app_20250403131329.py b/.history/app_20250403131329.py
new file mode 100644
index 0000000000000000000000000000000000000000..9252972be6f9b4cb4e917f0cf6aaffdf6e179f63
--- /dev/null
+++ b/.history/app_20250403131329.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/.history/app_20250403131335.py b/.history/app_20250403131335.py
new file mode 100644
index 0000000000000000000000000000000000000000..9252972be6f9b4cb4e917f0cf6aaffdf6e179f63
--- /dev/null
+++ b/.history/app_20250403131335.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/.history/app_20250403131446.py b/.history/app_20250403131446.py
new file mode 100644
index 0000000000000000000000000000000000000000..77db7ad88fa3a83c9c8dc02baecbc488533549cc
--- /dev/null
+++ b/.history/app_20250403131446.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use [Github Software](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/.history/app_20250403131524.py b/.history/app_20250403131524.py
new file mode 100644
index 0000000000000000000000000000000000000000..408b95ce6d001d967659a4411624366a7e923717
--- /dev/null
+++ b/.history/app_20250403131524.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/.history/app_20250403135543.py b/.history/app_20250403135543.py
new file mode 100644
index 0000000000000000000000000000000000000000..408b95ce6d001d967659a4411624366a7e923717
--- /dev/null
+++ b/.history/app_20250403135543.py
@@ -0,0 +1,324 @@
+import gradio as gr
+from pathlib import Path
+from reactagent.environment import Environment
+from reactagent.agents.agent_research import ResearchAgent
+from reactagent.runner import create_parser
+from reactagent import llm
+from reactagent.users.user import User
+import os
+import json
+
+
+# Global variables to store session state
+env = None
+agent = None
+state_example = False
+state_extract = False
+state_generate = False
+state_agent = False
+state_complete = False
+index_ex = "1"
+
+example_text = [
+ "Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
+ "Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
+]
+
+# Load example JSON file
+def load_example_data():
+ with open("example/example_data.json", "r") as json_file:
+ example_data = json.load(json_file)
+
+ for idx in example_data.keys():
+ try:
+ file = example_data[idx]["code_init"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_init"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ try:
+ file = example_data[idx]["code_final"]
+ with open(os.path.join("example", file), "r") as f:
+ example_data[idx]["code_final"] = f.read()
+ except FileNotFoundError:
+ print(f"File not found: {file}. Skipping key: {idx}")
+ return example_data
+
+example_data = load_example_data()
+
+# Function to handle the selection of an example and populate the respective fields
+def load_example(example_id):
+ global index_ex
+ index_ex = str(example_id)
+ example = example_data[index_ex]
+ paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
+ return paper_text
+
+example_text = [load_example(1), load_example(2)]
+
+# Function to handle example clicks
+def load_example_and_set_index(paper_text_input):
+ global index_ex, state_example
+ state_example = True
+ index_ex = str(example_text.index(paper_text_input) + 1)
+ paper_text = load_example(index_ex)
+
+ return paper_text, "", "", "", "", "", ""
+
+
+
+########## Phase 1 ##############
+
+def extract_research_elements(paper_text):
+ global state_extract, index_ex, state_example
+ if not state_example or paper_text == "":
+ return "", "", "", ""
+ state_extract = True
+ if paper_text != load_example(index_ex):
+ return "", "", "", ""
+ example = example_data[index_ex]
+ tasks = example['research_tasks']
+ gaps = example['research_gaps']
+ keywords = example['keywords']
+ recent_works = "\n".join(example['recent_works'])
+ return tasks, gaps, keywords, recent_works
+
+
+# Step 2: Generate Research Hypothesis and Experiment Plan
+def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
+ if (not state_extract or not state_example or paper_text == ""):
+ return "", "", "", ""
+ global state_generate, index_ex
+ state_generate = True
+ hypothesis = example_data[index_ex]['hypothesis']
+ experiment_plan = example_data[index_ex]['experiment_plan']
+ return hypothesis, experiment_plan, hypothesis, experiment_plan
+
+########## Phase 2 & 3 ##############
+def start_experiment_agent(hypothesis, plan):
+ if (not state_extract or not state_generate or not state_example):
+ return "", "", ""
+ global state_agent, step_index, state_complete
+ state_agent = True
+ step_index = 0
+ state_complete = False
+ # predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
+ return example_data[index_ex]['code_init'], predefined_action_log, "", ""
+
+def submit_feedback(user_feedback, history, previous_response):
+ if (not state_extract or not state_generate or not state_agent or not state_example):
+ return "", "", ""
+ global step_index, state_complete
+ step_index += 1
+ msg = history
+ if step_index < len(process_steps):
+ msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
+ response_info = process_steps[step_index]
+ response = info_to_message(response_info) # Convert dictionary to formatted string
+ response += "Please provide feedback based on the history, response entries, and observation, and questions: "
+ step_index += 1
+ msg += response
+ else:
+ state_complete = True
+ response = "Agent Finished."
+
+ return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
+
+def load_phase_2_inputs(hypothesis, plan):
+ return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
+
+
+
+predefined_action_log = """
+[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
+[Action]: Inspect Script (train.py)
+Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
+Objective: Understand the training script, including data processing, [...]
+[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
+[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
+"""
+
+
+predefined_observation = """
+Epoch [1/10],
+Train MSE: 0.543,
+Test MSE: 0.688
+Epoch [2/10],
+Train MSE: 0.242,
+Test MSE: 0.493\n
+"""
+
+# Initialize the global step_index and history
+process_steps = [
+ {
+ "Action": "Inspect Script Lines (train.py)",
+ "Observation": (
+ "The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
+ "Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
+ "to calculate RMSE for different dimensions. Placeholder functions train_model and "
+ "predict exist without implementations."
+ ),
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The script executed successfully. Generated embeddings using the BERT model. Completed "
+ "the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
+ ),
+ },
+ {
+ "Action": "Edit Script (train.py)",
+ "Observation": (
+ "Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
+ "The edited train.py now has clearly defined functions"
+ "for data loading (load_data), model definition (build_model), "
+ "training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
+ ),
+ },
+ {
+ "Action": "Retrieve Model",
+ "Observation": "CNN and BiLSTM retrieved.",
+ },
+ {
+ "Action": "Execute Script (train.py)",
+ "Observation": (
+ "The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
+ "the decrease in loss indicates improved model performance."
+ )
+ },
+ {
+ "Action": "Evaluation",
+ "Observation": predefined_observation,
+ }
+]
+def info_to_message(info):
+ msg = ""
+ for k, v in info.items():
+ if isinstance(v, dict):
+ tempv = v
+ v = ""
+ for k2, v2 in tempv.items():
+ v += f"{k2}:\n {v2}\n"
+ v = User.indent_text(v, 2)
+ msg += '-' * 64
+ msg += '\n'
+ msg += f"{k}:\n{v}\n"
+ return msg
+
+
+def handle_example_click(example_index):
+ global index_ex
+ index_ex = example_index
+ return load_example(index_ex) # Simply return the text to display it in the textbox
+
+# Gradio Interface
+with gr.Blocks(theme=gr.themes.Default()) as app:
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
+ gr.Markdown("### ")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+
+
+
+ gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchersβ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
+
+
+
+
+ # Use state variables to store generated hypothesis and experiment plan
+ hypothesis_state = gr.State("")
+ experiment_plan_state = gr.State("")
+
+ ########## Phase 1: Research Idea Generation Tab ##############
+ with gr.Tab("π‘Stage 1: Research Idea Generation"):
+ gr.Markdown("### Extract Research Elements and Generate Research Ideas")
+
+ with gr.Row():
+ with gr.Column():
+ paper_text_input = gr.Textbox(value="", lines=10, label="π Research Paper Text")
+ extract_button = gr.Button("π Extract Research Elements")
+ with gr.Row():
+ tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
+ gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
+ keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
+ recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
+ with gr.Column():
+ with gr.Row(): # Move the button to the top
+ generate_button = gr.Button("βοΈ Generate Research Hypothesis & Experiment Plan")
+ with gr.Group():
+ gr.Markdown("### π Research Idea")
+ with gr.Row():
+ hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
+ experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
+
+ gr.Examples(
+ examples=example_text,
+ inputs=[paper_text_input],
+ outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
+ fn=load_example_and_set_index,
+ run_on_click = True,
+ label="β¬οΈ Click an example to load"
+ )
+
+ # Step 1: Extract Research Elements
+ extract_button.click(
+ fn=extract_research_elements,
+ inputs=paper_text_input,
+ outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
+ )
+
+ generate_button.click(
+ fn=generate_and_store,
+ inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
+ outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
+ )
+
+
+
+ ########## Phase 2 & 3: Experiment implementation and execution ##############
+ with gr.Tab("π§ͺ Stage 2 & Stage 3: Experiment implementation and execution"):
+ gr.Markdown("### Interact with the ExperimentAgent")
+
+ with gr.Row():
+ with gr.Column():
+ with gr.Group():
+ gr.Markdown("### π Generated Research Idea")
+ with gr.Row():
+ idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
+ plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
+
+ with gr.Column():
+ start_exp_agnet = gr.Button("βοΈ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
+ with gr.Group():
+ gr.Markdown("### Implementation + Execution Log")
+ log = gr.Textbox(label="π Execution Log", lines=20, interactive=False)
+ code_display = gr.Code(label="π§βπ» Implementation", language="python", interactive=False)
+
+ with gr.Column():
+ response = gr.Textbox(label="π€ ExperimentAgent Response", lines=30, interactive=False)
+ feedback = gr.Textbox(placeholder="N/A", label="π§βπ¬ User Feedback", lines=3, interactive=True)
+ submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
+
+ hypothesis_state.change(
+ fn=load_phase_2_inputs,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[idea_input, plan_input, code_display]
+ )
+
+ # Start research agent
+ start_exp_agnet.click(
+ fn=start_experiment_agent,
+ inputs=[hypothesis_state, experiment_plan_state],
+ outputs=[code_display, log, response, feedback]
+ )
+
+ submit_button.click(
+ fn=submit_feedback,
+ inputs=[feedback, log, response],
+ outputs=[log, response, code_display, feedback]
+ )
+
+# Test
+if __name__ == "__main__":
+ step_index = 0
+ app.launch()
\ No newline at end of file
diff --git a/app.py b/app.py
index 289a5b645f687496860867560bac9640f8649d0a..408b95ce6d001d967659a4411624366a7e923717 100644
--- a/app.py
+++ b/app.py
@@ -213,10 +213,10 @@ def handle_example_click(example_index):
# Gradio Interface
with gr.Blocks(theme=gr.themes.Default()) as app:
- gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents [Paper Link](https://www.arxiv.org/abs/2408.14033)")
+ gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
gr.Markdown("### ")
- gr.Markdown("## This UI is for predefined example demo only.")
- gr.Markdown("## To reproduce the results please use software in [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
+ gr.Markdown("## This UI is for predefined example demo only.")
+ gr.Markdown("## To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/).")
@@ -321,4 +321,4 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
# Test
if __name__ == "__main__":
step_index = 0
- app.launch(share=True)
\ No newline at end of file
+ app.launch()
\ No newline at end of file
diff --git a/reactagent/__pycache__/__init__.cpython-310.pyc b/reactagent/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9585820087bf591177854b3d9897906409f390d4
Binary files /dev/null and b/reactagent/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/__init__.cpython-38.pyc b/reactagent/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8e81ee8680fe25543864c22416e5bdbd935d33f
Binary files /dev/null and b/reactagent/__pycache__/__init__.cpython-38.pyc differ
diff --git a/reactagent/__pycache__/environment.cpython-310.pyc b/reactagent/__pycache__/environment.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f94b2f13dd90c457b08b7989892e17e613bcced
Binary files /dev/null and b/reactagent/__pycache__/environment.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/environment.cpython-38.pyc b/reactagent/__pycache__/environment.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..680b45c60700f808723ec65cadb2434770a0252c
Binary files /dev/null and b/reactagent/__pycache__/environment.cpython-38.pyc differ
diff --git a/reactagent/__pycache__/high_level_actions.cpython-310.pyc b/reactagent/__pycache__/high_level_actions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b918c1775dbe8d8ae19b424f454bc6c413665429
Binary files /dev/null and b/reactagent/__pycache__/high_level_actions.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/high_level_actions.cpython-38.pyc b/reactagent/__pycache__/high_level_actions.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..43a78a0e062a4f06e9e046e4e3c081c3d818972e
Binary files /dev/null and b/reactagent/__pycache__/high_level_actions.cpython-38.pyc differ
diff --git a/reactagent/__pycache__/llm.cpython-310.pyc b/reactagent/__pycache__/llm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28ef68bbdad204f708d3e9b1df1e96e041786f0f
Binary files /dev/null and b/reactagent/__pycache__/llm.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/llm.cpython-38.pyc b/reactagent/__pycache__/llm.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a70f08552342ee5fadff1f8b5fd803b19635943a
Binary files /dev/null and b/reactagent/__pycache__/llm.cpython-38.pyc differ
diff --git a/reactagent/__pycache__/low_level_actions.cpython-310.pyc b/reactagent/__pycache__/low_level_actions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c56d0df2166063b5394443a957fb2d86fa6a72c
Binary files /dev/null and b/reactagent/__pycache__/low_level_actions.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/low_level_actions.cpython-38.pyc b/reactagent/__pycache__/low_level_actions.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b65e898823eff50b70f55560955caaa719dab5c
Binary files /dev/null and b/reactagent/__pycache__/low_level_actions.cpython-38.pyc differ
diff --git a/reactagent/__pycache__/p2m_actions.cpython-310.pyc b/reactagent/__pycache__/p2m_actions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ba0c43247fff57f3961d43ceb0371ae080e4189
Binary files /dev/null and b/reactagent/__pycache__/p2m_actions.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/prepare_task.cpython-310.pyc b/reactagent/__pycache__/prepare_task.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6446a0546fb97be6234528e58ca7490a4464886b
Binary files /dev/null and b/reactagent/__pycache__/prepare_task.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/runner.cpython-310.pyc b/reactagent/__pycache__/runner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d3d0e019a85189007cce46bd3476150987939db
Binary files /dev/null and b/reactagent/__pycache__/runner.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/schema.cpython-310.pyc b/reactagent/__pycache__/schema.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8fc371cfcfc81e1ae81515d415a488fcd6ca7f29
Binary files /dev/null and b/reactagent/__pycache__/schema.cpython-310.pyc differ
diff --git a/reactagent/__pycache__/schema.cpython-38.pyc b/reactagent/__pycache__/schema.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2912dc7bbe3e00e30df2d565af8836e79384fb96
Binary files /dev/null and b/reactagent/__pycache__/schema.cpython-38.pyc differ
diff --git a/reactagent/agents/__pycache__/__init__.cpython-310.pyc b/reactagent/agents/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bd5ed01d6b6eb073d7a0fe90847a8f35dd7200d
Binary files /dev/null and b/reactagent/agents/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/agents/__pycache__/agent.cpython-310.pyc b/reactagent/agents/__pycache__/agent.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d11b0eaed3062702838fc47c4bc54bc0d231a86
Binary files /dev/null and b/reactagent/agents/__pycache__/agent.cpython-310.pyc differ
diff --git a/reactagent/agents/__pycache__/agent_research.cpython-310.pyc b/reactagent/agents/__pycache__/agent_research.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..053b7ec530f9e5b8c162eb3f1560b7f5db8d0ab8
Binary files /dev/null and b/reactagent/agents/__pycache__/agent_research.cpython-310.pyc differ
diff --git a/reactagent/agents/__pycache__/format.cpython-310.pyc b/reactagent/agents/__pycache__/format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..903f2e5ecd85fd13039293c52958c50dc3685435
Binary files /dev/null and b/reactagent/agents/__pycache__/format.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e65e19995fdba4b4d673c8b9f4252f8ef1236a3
Binary files /dev/null and b/reactagent/prompt2model/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_generator/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/dataset_generator/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d342f5f452461723ca72472238a459b87dc136c0
Binary files /dev/null and b/reactagent/prompt2model/dataset_generator/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_generator/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/dataset_generator/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a41e29a3fad836ba99e774440c292169b140297
Binary files /dev/null and b/reactagent/prompt2model/dataset_generator/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_generator/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/dataset_generator/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e787e587c693ec8dfea2932d5626e0b00fd7061
Binary files /dev/null and b/reactagent/prompt2model/dataset_generator/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_generator/__pycache__/prompt_based.cpython-310.pyc b/reactagent/prompt2model/dataset_generator/__pycache__/prompt_based.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42f9264cf9cf9e0c14bd8e14ae6058002c570f1b
Binary files /dev/null and b/reactagent/prompt2model/dataset_generator/__pycache__/prompt_based.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_generator/__pycache__/prompt_template.cpython-310.pyc b/reactagent/prompt2model/dataset_generator/__pycache__/prompt_template.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd7f6e42e2a68660c39ee68fd62abd20abc7f4ac
Binary files /dev/null and b/reactagent/prompt2model/dataset_generator/__pycache__/prompt_template.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_processor/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/dataset_processor/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7bd168223fcc363076eddd48ef5e1786c73ce072
Binary files /dev/null and b/reactagent/prompt2model/dataset_processor/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_processor/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/dataset_processor/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65ea4d216b2d784f730e69ae1bcd6733e8d9e57f
Binary files /dev/null and b/reactagent/prompt2model/dataset_processor/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_processor/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/dataset_processor/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8dba31107ebc8291e02c5db60d850c41d8a9ef4a
Binary files /dev/null and b/reactagent/prompt2model/dataset_processor/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_processor/__pycache__/textualize.cpython-310.pyc b/reactagent/prompt2model/dataset_processor/__pycache__/textualize.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44ff5fb9931a7b218d4b1e9079773e004fbcc344
Binary files /dev/null and b/reactagent/prompt2model/dataset_processor/__pycache__/textualize.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e004df9909848cc88431740ce93bcf03ec32507d
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ac90e419e274271ab7570ddac70c4c7e5160685
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/column_selection_prompt.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/column_selection_prompt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad8c46b3ca1f8200990127217f77dbdfa8c58936
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/column_selection_prompt.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/description_dataset_retriever.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/description_dataset_retriever.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c28c22a72fd7d2572f01765de4e0d33d03be229
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/description_dataset_retriever.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aab52b915885832b664e05636c5310cc3a27ba9a
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/reranking_prompt.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/reranking_prompt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e09f60e16b5c8128a6d4db4ec4b28758343cfdf
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/reranking_prompt.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_retriever/__pycache__/task_expansion_prompt.cpython-310.pyc b/reactagent/prompt2model/dataset_retriever/__pycache__/task_expansion_prompt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e39742de215e343b38ce356eae87f6994b9782f
Binary files /dev/null and b/reactagent/prompt2model/dataset_retriever/__pycache__/task_expansion_prompt.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_transformer/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/dataset_transformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37df394a369eef2cfc144398aeb9f3e04e0f5c9d
Binary files /dev/null and b/reactagent/prompt2model/dataset_transformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_transformer/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/dataset_transformer/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..57dd7cab0f295f60a3b1495f01fefb0820e11a10
Binary files /dev/null and b/reactagent/prompt2model/dataset_transformer/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_transformer/__pycache__/prompt_based.cpython-310.pyc b/reactagent/prompt2model/dataset_transformer/__pycache__/prompt_based.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7c5c180a39c6281d2e3722cb26b26c02246e0bb
Binary files /dev/null and b/reactagent/prompt2model/dataset_transformer/__pycache__/prompt_based.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/dataset_transformer/__pycache__/prompt_template.cpython-310.pyc b/reactagent/prompt2model/dataset_transformer/__pycache__/prompt_template.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..30b53b65e4b11056f7e8a832c5ced732fdae0e36
Binary files /dev/null and b/reactagent/prompt2model/dataset_transformer/__pycache__/prompt_template.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_evaluator/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/model_evaluator/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a76de8ce695c9086f4c2cbcbd3d909b59c8a534
Binary files /dev/null and b/reactagent/prompt2model/model_evaluator/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_evaluator/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/model_evaluator/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..115a18c35f58253a31173615a219cdeba53ea6e5
Binary files /dev/null and b/reactagent/prompt2model/model_evaluator/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_evaluator/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/model_evaluator/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6922967f29943375205edd5faec9567f1cededad
Binary files /dev/null and b/reactagent/prompt2model/model_evaluator/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_evaluator/__pycache__/seq2seq.cpython-310.pyc b/reactagent/prompt2model/model_evaluator/__pycache__/seq2seq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..138a09a6a4b4fc75678b7391b682c61caa1cb5d8
Binary files /dev/null and b/reactagent/prompt2model/model_evaluator/__pycache__/seq2seq.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_executor/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/model_executor/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1a6ec18393aba94cbf2b107b078ff3ff6094337
Binary files /dev/null and b/reactagent/prompt2model/model_executor/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_executor/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/model_executor/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5342268b15a4c46ebc28b686f187f5f5edcc56eb
Binary files /dev/null and b/reactagent/prompt2model/model_executor/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_executor/__pycache__/generate.cpython-310.pyc b/reactagent/prompt2model/model_executor/__pycache__/generate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9d293e8b392b685748df1e996dbb78d02ccb255
Binary files /dev/null and b/reactagent/prompt2model/model_executor/__pycache__/generate.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_executor/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/model_executor/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9d0516cc58ace5e0238ef0de51dc1fdd548d2d2
Binary files /dev/null and b/reactagent/prompt2model/model_executor/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_retriever/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/model_retriever/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7ffffb41d3284977e331ebd4bcc3cb79e9f04ad
Binary files /dev/null and b/reactagent/prompt2model/model_retriever/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_retriever/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/model_retriever/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e53fba0caa165e53035b7b9b3b96cc01c5b38277
Binary files /dev/null and b/reactagent/prompt2model/model_retriever/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_retriever/__pycache__/description_based_retriever.cpython-310.pyc b/reactagent/prompt2model/model_retriever/__pycache__/description_based_retriever.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26e468eb977b79558c06710d379779a35389568c
Binary files /dev/null and b/reactagent/prompt2model/model_retriever/__pycache__/description_based_retriever.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_retriever/__pycache__/generate_hypothetical_document.cpython-310.pyc b/reactagent/prompt2model/model_retriever/__pycache__/generate_hypothetical_document.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..060ef8b9f07a723ba38e5ab6f5aa2f2b11f42899
Binary files /dev/null and b/reactagent/prompt2model/model_retriever/__pycache__/generate_hypothetical_document.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_retriever/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/model_retriever/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a1698dfb9b81cdf9ddfd6a3c9159023c688161f
Binary files /dev/null and b/reactagent/prompt2model/model_retriever/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_trainer/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/model_trainer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4bf85d2cdd446a36cd33c298c62a424d00e4fcc4
Binary files /dev/null and b/reactagent/prompt2model/model_trainer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_trainer/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/model_trainer/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5473bd8529ffc87233c3e27b48fe65cddc4ae98
Binary files /dev/null and b/reactagent/prompt2model/model_trainer/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_trainer/__pycache__/callback.cpython-310.pyc b/reactagent/prompt2model/model_trainer/__pycache__/callback.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28c06ed629398ac47a10202baa23d3853fc7793a
Binary files /dev/null and b/reactagent/prompt2model/model_trainer/__pycache__/callback.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_trainer/__pycache__/generate.cpython-310.pyc b/reactagent/prompt2model/model_trainer/__pycache__/generate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d2eedbb5e76292d4ee93be44dfe4d7ef603d732
Binary files /dev/null and b/reactagent/prompt2model/model_trainer/__pycache__/generate.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/model_trainer/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/model_trainer/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..306d38bb376ccef16fd7c7803f0e4fefa7f0624e
Binary files /dev/null and b/reactagent/prompt2model/model_trainer/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/prompt_parser/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/prompt_parser/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79029c645d161fa245632bb2d3d541210f95373f
Binary files /dev/null and b/reactagent/prompt2model/prompt_parser/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/prompt_parser/__pycache__/base.cpython-310.pyc b/reactagent/prompt2model/prompt_parser/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc42e67bdc2dd2b98a0d1485c3aca56c22e1c4bc
Binary files /dev/null and b/reactagent/prompt2model/prompt_parser/__pycache__/base.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/prompt_parser/__pycache__/instr_parser.cpython-310.pyc b/reactagent/prompt2model/prompt_parser/__pycache__/instr_parser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c5d0f2573671bc4c02f419eecf21595351bd8ee
Binary files /dev/null and b/reactagent/prompt2model/prompt_parser/__pycache__/instr_parser.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/prompt_parser/__pycache__/instr_parser_prompt.cpython-310.pyc b/reactagent/prompt2model/prompt_parser/__pycache__/instr_parser_prompt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6cffe00d587d79b43cc1046f58f981e9be169556
Binary files /dev/null and b/reactagent/prompt2model/prompt_parser/__pycache__/instr_parser_prompt.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/prompt_parser/__pycache__/mock.cpython-310.pyc b/reactagent/prompt2model/prompt_parser/__pycache__/mock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5af96709aaf3a7c08c995e497a54fd00de07f554
Binary files /dev/null and b/reactagent/prompt2model/prompt_parser/__pycache__/mock.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09416390b3c60c37d18ef5953a921e7347fccf86
Binary files /dev/null and b/reactagent/prompt2model/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/__pycache__/api_tools.cpython-310.pyc b/reactagent/prompt2model/utils/__pycache__/api_tools.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8df5a8f842e32b9e1458b53772cbd4935bc67d0
Binary files /dev/null and b/reactagent/prompt2model/utils/__pycache__/api_tools.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/__pycache__/dataset_utils.cpython-310.pyc b/reactagent/prompt2model/utils/__pycache__/dataset_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2e6b57e57743a63cc08961dfd082c81944e54ea
Binary files /dev/null and b/reactagent/prompt2model/utils/__pycache__/dataset_utils.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/__pycache__/logging_utils.cpython-310.pyc b/reactagent/prompt2model/utils/__pycache__/logging_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c6334a690da59287e562a881a59328875bed48f
Binary files /dev/null and b/reactagent/prompt2model/utils/__pycache__/logging_utils.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/__pycache__/parse_responses.cpython-310.pyc b/reactagent/prompt2model/utils/__pycache__/parse_responses.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3e83a19c8dac3937f1314a0bdb8d3948a99d809
Binary files /dev/null and b/reactagent/prompt2model/utils/__pycache__/parse_responses.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/__pycache__/rng.cpython-310.pyc b/reactagent/prompt2model/utils/__pycache__/rng.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed202a12174401b592c4632410b27d17ae5f5433
Binary files /dev/null and b/reactagent/prompt2model/utils/__pycache__/rng.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/tevatron_utils/__pycache__/__init__.cpython-310.pyc b/reactagent/prompt2model/utils/tevatron_utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c101f61c84b4aa7a2617c17bff2cb5f20723d99
Binary files /dev/null and b/reactagent/prompt2model/utils/tevatron_utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/tevatron_utils/__pycache__/encode.cpython-310.pyc b/reactagent/prompt2model/utils/tevatron_utils/__pycache__/encode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..632e317fe0193e690b971d2a5642aa18c0cbd2b4
Binary files /dev/null and b/reactagent/prompt2model/utils/tevatron_utils/__pycache__/encode.cpython-310.pyc differ
diff --git a/reactagent/prompt2model/utils/tevatron_utils/__pycache__/retrieve.cpython-310.pyc b/reactagent/prompt2model/utils/tevatron_utils/__pycache__/retrieve.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfe6fb21fbfb622f911f3ad5e45b146b1b044a78
Binary files /dev/null and b/reactagent/prompt2model/utils/tevatron_utils/__pycache__/retrieve.cpython-310.pyc differ
diff --git a/reactagent/users/__pycache__/console_user.cpython-310.pyc b/reactagent/users/__pycache__/console_user.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e39848ef7f5276a3a77bf3d8ce365317fae703fc
Binary files /dev/null and b/reactagent/users/__pycache__/console_user.cpython-310.pyc differ
diff --git a/reactagent/users/__pycache__/user.cpython-310.pyc b/reactagent/users/__pycache__/user.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc7ab574c4599ed117c877141d3916511308bf4f
Binary files /dev/null and b/reactagent/users/__pycache__/user.cpython-310.pyc differ