Spaces:
Running
Running
rm extract
Browse files- .history/app_20250404171116.py +329 -0
- .history/app_20250404171127.py +329 -0
- .history/app_20250404171152.py +329 -0
- .history/app_20250404171213.py +329 -0
- .history/app_20250404171307.py +329 -0
- .history/app_20250404171308.py +329 -0
- .history/app_20250404171328.py +329 -0
- .history/app_20250404171503.py +329 -0
- .history/app_20250404171504.py +329 -0
- .history/app_20250404171505.py +329 -0
- .history/app_20250404171523.py +329 -0
- .history/app_20250404172015.py +342 -0
- .history/app_20250404172040.py +340 -0
- .history/app_20250404172125.py +333 -0
- .history/app_20250404172143.py +333 -0
- .history/app_20250404172217.py +333 -0
- .history/app_20250404172253.py +333 -0
- .history/app_20250404173244.py +333 -0
- .history/app_20250404173256.py +333 -0
- .history/app_20250404173257.py +333 -0
- .history/app_20250404174324.py +341 -0
- .history/app_20250404174352.py +341 -0
- .history/app_20250404174355.py +340 -0
- .history/app_20250404174357.py +339 -0
- .history/app_20250404174402.py +339 -0
- .history/app_20250404174438.py +341 -0
- .history/app_20250404174439.py +341 -0
- .history/app_20250404174544.py +335 -0
- .history/app_20250404174545.py +335 -0
- .history/app_20250404174823.py +335 -0
- .history/app_20250404174828.py +335 -0
- .history/app_20250404174831.py +335 -0
- .history/app_20250404174832.py +335 -0
- .history/app_20250404175024.py +335 -0
- .history/app_20250404175025.py +335 -0
- .history/app_20250404175029.py +335 -0
- .history/app_20250404175030.py +335 -0
- .history/app_20250404175127.py +335 -0
- .history/app_20250404175128.py +335 -0
- .history/app_20250404175236.py +335 -0
- .history/app_20250404175239.py +335 -0
- .history/app_20250404175250.py +335 -0
- .history/app_20250404175251.py +335 -0
- app.py +19 -13
.history/app_20250404171116.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start·!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171127.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start·!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171152.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171213.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171307.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
# with gr.Row():
|
244 |
+
# tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
# gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
# keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
# recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171308.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
# with gr.Row():
|
244 |
+
# tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
# gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
# keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
# recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171328.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171503.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171504.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171505.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
extract_button.click(
|
260 |
+
fn=extract_research_elements,
|
261 |
+
inputs=paper_text_input,
|
262 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
)
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404171523.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
print(paper_text)
|
80 |
+
return "", "", "", ""
|
81 |
+
example = example_data[index_ex]
|
82 |
+
tasks = example['research_tasks']
|
83 |
+
gaps = example['research_gaps']
|
84 |
+
keywords = example['keywords']
|
85 |
+
recent_works = "\n".join(example['recent_works'])
|
86 |
+
return tasks, gaps, keywords, recent_works
|
87 |
+
|
88 |
+
|
89 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
+
if (not state_extract or not state_example or paper_text == ""):
|
92 |
+
return "", "", "", ""
|
93 |
+
global state_generate, index_ex
|
94 |
+
state_generate = True
|
95 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
96 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
97 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
98 |
+
|
99 |
+
########## Phase 2 & 3 ##############
|
100 |
+
def start_experiment_agent(hypothesis, plan):
|
101 |
+
if (not state_extract or not state_generate or not state_example):
|
102 |
+
return "", "", ""
|
103 |
+
global state_agent, step_index, state_complete
|
104 |
+
state_agent = True
|
105 |
+
step_index = 0
|
106 |
+
state_complete = False
|
107 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
108 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
109 |
+
|
110 |
+
def submit_feedback(user_feedback, history, previous_response):
|
111 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global step_index, state_complete
|
114 |
+
step_index += 1
|
115 |
+
msg = history
|
116 |
+
if step_index < len(process_steps):
|
117 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
118 |
+
response_info = process_steps[step_index]
|
119 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
120 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
121 |
+
step_index += 1
|
122 |
+
msg += response
|
123 |
+
else:
|
124 |
+
state_complete = True
|
125 |
+
response = "Agent Finished."
|
126 |
+
|
127 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
128 |
+
|
129 |
+
def load_phase_2_inputs(hypothesis, plan):
|
130 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
predefined_action_log = """
|
135 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
136 |
+
[Action]: Inspect Script (train.py)
|
137 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
138 |
+
Objective: Understand the training script, including data processing, [...]
|
139 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
140 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
141 |
+
"""
|
142 |
+
|
143 |
+
|
144 |
+
predefined_observation = """
|
145 |
+
Epoch [1/10],
|
146 |
+
Train MSE: 0.543,
|
147 |
+
Test MSE: 0.688
|
148 |
+
Epoch [2/10],
|
149 |
+
Train MSE: 0.242,
|
150 |
+
Test MSE: 0.493\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
# Initialize the global step_index and history
|
154 |
+
process_steps = [
|
155 |
+
{
|
156 |
+
"Action": "Inspect Script Lines (train.py)",
|
157 |
+
"Observation": (
|
158 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
159 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
160 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
161 |
+
"predict exist without implementations."
|
162 |
+
),
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"Action": "Execute Script (train.py)",
|
166 |
+
"Observation": (
|
167 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
168 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
169 |
+
),
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"Action": "Edit Script (train.py)",
|
173 |
+
"Observation": (
|
174 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
175 |
+
"The edited train.py now has clearly defined functions"
|
176 |
+
"for data loading (load_data), model definition (build_model), "
|
177 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
178 |
+
),
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"Action": "Retrieve Model",
|
182 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Execute Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
188 |
+
"the decrease in loss indicates improved model performance."
|
189 |
+
)
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Evaluation",
|
193 |
+
"Observation": predefined_observation,
|
194 |
+
}
|
195 |
+
]
|
196 |
+
def info_to_message(info):
|
197 |
+
msg = ""
|
198 |
+
for k, v in info.items():
|
199 |
+
if isinstance(v, dict):
|
200 |
+
tempv = v
|
201 |
+
v = ""
|
202 |
+
for k2, v2 in tempv.items():
|
203 |
+
v += f"{k2}:\n {v2}\n"
|
204 |
+
v = User.indent_text(v, 2)
|
205 |
+
msg += '-' * 64
|
206 |
+
msg += '\n'
|
207 |
+
msg += f"{k}:\n{v}\n"
|
208 |
+
return msg
|
209 |
+
|
210 |
+
|
211 |
+
def handle_example_click(example_index):
|
212 |
+
global index_ex
|
213 |
+
index_ex = example_index
|
214 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
215 |
+
|
216 |
+
# Gradio Interface
|
217 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
218 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
219 |
+
gr.Markdown("### ")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
221 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
+
|
227 |
+
|
228 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
229 |
+
|
230 |
+
|
231 |
+
# Use state variables to store generated hypothesis and experiment plan
|
232 |
+
hypothesis_state = gr.State("")
|
233 |
+
experiment_plan_state = gr.State("")
|
234 |
+
|
235 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
236 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
237 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
238 |
+
with gr.Row():
|
239 |
+
with gr.Column():
|
240 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
+
|
242 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
+
with gr.Row():
|
244 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
246 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
247 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
248 |
+
with gr.Column():
|
249 |
+
with gr.Row(): # Move the button to the top
|
250 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
251 |
+
with gr.Group():
|
252 |
+
gr.Markdown("### 🌟 Research Idea")
|
253 |
+
with gr.Row():
|
254 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
255 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
256 |
+
|
257 |
+
|
258 |
+
# Step 1: Extract Research Elements
|
259 |
+
# extract_button.click(
|
260 |
+
# fn=extract_research_elements,
|
261 |
+
# inputs=paper_text_input,
|
262 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
263 |
+
# )
|
264 |
+
|
265 |
+
generate_button.click(
|
266 |
+
fn=generate_and_store,
|
267 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
268 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
269 |
+
)
|
270 |
+
|
271 |
+
gr.Examples(
|
272 |
+
examples=example_text,
|
273 |
+
inputs=[paper_text_input],
|
274 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
275 |
+
fn=load_example_and_set_index,
|
276 |
+
run_on_click = True,
|
277 |
+
# label="⬇️ Click an example to load"
|
278 |
+
)
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
284 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
285 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
286 |
+
|
287 |
+
with gr.Row():
|
288 |
+
with gr.Column():
|
289 |
+
with gr.Group():
|
290 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
291 |
+
with gr.Row():
|
292 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
293 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
294 |
+
|
295 |
+
with gr.Column():
|
296 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
297 |
+
with gr.Group():
|
298 |
+
gr.Markdown("### Implementation + Execution Log")
|
299 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
300 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
301 |
+
|
302 |
+
with gr.Column():
|
303 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
304 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
305 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
306 |
+
|
307 |
+
hypothesis_state.change(
|
308 |
+
fn=load_phase_2_inputs,
|
309 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
310 |
+
outputs=[idea_input, plan_input, code_display]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Start research agent
|
314 |
+
start_exp_agnet.click(
|
315 |
+
fn=start_experiment_agent,
|
316 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
317 |
+
outputs=[code_display, log, response, feedback]
|
318 |
+
)
|
319 |
+
|
320 |
+
submit_button.click(
|
321 |
+
fn=submit_feedback,
|
322 |
+
inputs=[feedback, log, response],
|
323 |
+
outputs=[log, response, code_display, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Test
|
327 |
+
if __name__ == "__main__":
|
328 |
+
step_index = 0
|
329 |
+
app.launch(share=True)
|
.history/app_20250404172015.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
global state_extract, index_ex, state_example
|
56 |
+
if not state_example or paper_text == "":
|
57 |
+
return "", "", "", ""
|
58 |
+
state_extract = True
|
59 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
60 |
+
print("Mismatch detected.")
|
61 |
+
print(paper_text)
|
62 |
+
return "", "", "", ""
|
63 |
+
example = example_data[index_ex]
|
64 |
+
tasks = example['research_tasks']
|
65 |
+
gaps = example['research_gaps']
|
66 |
+
keywords = example['keywords']
|
67 |
+
recent_works = "\n".join(example['recent_works'])
|
68 |
+
return paper_text
|
69 |
+
|
70 |
+
example_text = [load_example(1), load_example(2)]
|
71 |
+
|
72 |
+
# Function to handle example clicks
|
73 |
+
def load_example_and_set_index(paper_text_input):
|
74 |
+
global index_ex, state_example
|
75 |
+
state_example = True
|
76 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
77 |
+
paper_text = load_example(index_ex)
|
78 |
+
|
79 |
+
return paper_text, "", "", "", "", "", ""
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
########## Phase 1 ##############
|
84 |
+
|
85 |
+
def extract_research_elements(paper_text):
|
86 |
+
global state_extract, index_ex, state_example
|
87 |
+
if not state_example or paper_text == "":
|
88 |
+
return "", "", "", ""
|
89 |
+
state_extract = True
|
90 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
91 |
+
print("Mismatch detected.")
|
92 |
+
print(paper_text)
|
93 |
+
return "", "", "", ""
|
94 |
+
example = example_data[index_ex]
|
95 |
+
tasks = example['research_tasks']
|
96 |
+
gaps = example['research_gaps']
|
97 |
+
keywords = example['keywords']
|
98 |
+
recent_works = "\n".join(example['recent_works'])
|
99 |
+
return tasks, gaps, keywords, recent_works
|
100 |
+
|
101 |
+
|
102 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
103 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
104 |
+
if (not state_extract or not state_example or paper_text == ""):
|
105 |
+
return "", "", "", ""
|
106 |
+
global state_generate, index_ex
|
107 |
+
state_generate = True
|
108 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
109 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
110 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
111 |
+
|
112 |
+
########## Phase 2 & 3 ##############
|
113 |
+
def start_experiment_agent(hypothesis, plan):
|
114 |
+
if (not state_extract or not state_generate or not state_example):
|
115 |
+
return "", "", ""
|
116 |
+
global state_agent, step_index, state_complete
|
117 |
+
state_agent = True
|
118 |
+
step_index = 0
|
119 |
+
state_complete = False
|
120 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
121 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
122 |
+
|
123 |
+
def submit_feedback(user_feedback, history, previous_response):
|
124 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
125 |
+
return "", "", ""
|
126 |
+
global step_index, state_complete
|
127 |
+
step_index += 1
|
128 |
+
msg = history
|
129 |
+
if step_index < len(process_steps):
|
130 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
131 |
+
response_info = process_steps[step_index]
|
132 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
133 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
134 |
+
step_index += 1
|
135 |
+
msg += response
|
136 |
+
else:
|
137 |
+
state_complete = True
|
138 |
+
response = "Agent Finished."
|
139 |
+
|
140 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
141 |
+
|
142 |
+
def load_phase_2_inputs(hypothesis, plan):
|
143 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
predefined_action_log = """
|
148 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
149 |
+
[Action]: Inspect Script (train.py)
|
150 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
151 |
+
Objective: Understand the training script, including data processing, [...]
|
152 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
153 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
154 |
+
"""
|
155 |
+
|
156 |
+
|
157 |
+
predefined_observation = """
|
158 |
+
Epoch [1/10],
|
159 |
+
Train MSE: 0.543,
|
160 |
+
Test MSE: 0.688
|
161 |
+
Epoch [2/10],
|
162 |
+
Train MSE: 0.242,
|
163 |
+
Test MSE: 0.493\n
|
164 |
+
"""
|
165 |
+
|
166 |
+
# Initialize the global step_index and history
|
167 |
+
process_steps = [
|
168 |
+
{
|
169 |
+
"Action": "Inspect Script Lines (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
172 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
173 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
174 |
+
"predict exist without implementations."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Execute Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
181 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Edit Script (train.py)",
|
186 |
+
"Observation": (
|
187 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
188 |
+
"The edited train.py now has clearly defined functions"
|
189 |
+
"for data loading (load_data), model definition (build_model), "
|
190 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
191 |
+
),
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"Action": "Retrieve Model",
|
195 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Execute Script (train.py)",
|
199 |
+
"Observation": (
|
200 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
201 |
+
"the decrease in loss indicates improved model performance."
|
202 |
+
)
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"Action": "Evaluation",
|
206 |
+
"Observation": predefined_observation,
|
207 |
+
}
|
208 |
+
]
|
209 |
+
def info_to_message(info):
|
210 |
+
msg = ""
|
211 |
+
for k, v in info.items():
|
212 |
+
if isinstance(v, dict):
|
213 |
+
tempv = v
|
214 |
+
v = ""
|
215 |
+
for k2, v2 in tempv.items():
|
216 |
+
v += f"{k2}:\n {v2}\n"
|
217 |
+
v = User.indent_text(v, 2)
|
218 |
+
msg += '-' * 64
|
219 |
+
msg += '\n'
|
220 |
+
msg += f"{k}:\n{v}\n"
|
221 |
+
return msg
|
222 |
+
|
223 |
+
|
224 |
+
def handle_example_click(example_index):
|
225 |
+
global index_ex
|
226 |
+
index_ex = example_index
|
227 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
228 |
+
|
229 |
+
# Gradio Interface
|
230 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
231 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
232 |
+
gr.Markdown("### ")
|
233 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
234 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
239 |
+
|
240 |
+
|
241 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
242 |
+
|
243 |
+
|
244 |
+
# Use state variables to store generated hypothesis and experiment plan
|
245 |
+
hypothesis_state = gr.State("")
|
246 |
+
experiment_plan_state = gr.State("")
|
247 |
+
|
248 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
249 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
250 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
251 |
+
with gr.Row():
|
252 |
+
with gr.Column():
|
253 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
254 |
+
|
255 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
256 |
+
with gr.Row():
|
257 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
258 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
259 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
260 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
261 |
+
with gr.Column():
|
262 |
+
with gr.Row(): # Move the button to the top
|
263 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
264 |
+
with gr.Group():
|
265 |
+
gr.Markdown("### 🌟 Research Idea")
|
266 |
+
with gr.Row():
|
267 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
268 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
269 |
+
|
270 |
+
|
271 |
+
# Step 1: Extract Research Elements
|
272 |
+
# extract_button.click(
|
273 |
+
# fn=extract_research_elements,
|
274 |
+
# inputs=paper_text_input,
|
275 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
276 |
+
# )
|
277 |
+
|
278 |
+
generate_button.click(
|
279 |
+
fn=generate_and_store,
|
280 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
282 |
+
)
|
283 |
+
|
284 |
+
gr.Examples(
|
285 |
+
examples=example_text,
|
286 |
+
inputs=[paper_text_input],
|
287 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
288 |
+
fn=load_example_and_set_index,
|
289 |
+
run_on_click = True,
|
290 |
+
# label="⬇️ Click an example to load"
|
291 |
+
)
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
|
296 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
297 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
298 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
299 |
+
|
300 |
+
with gr.Row():
|
301 |
+
with gr.Column():
|
302 |
+
with gr.Group():
|
303 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
304 |
+
with gr.Row():
|
305 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
306 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
310 |
+
with gr.Group():
|
311 |
+
gr.Markdown("### Implementation + Execution Log")
|
312 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
313 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
314 |
+
|
315 |
+
with gr.Column():
|
316 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
317 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
318 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
319 |
+
|
320 |
+
hypothesis_state.change(
|
321 |
+
fn=load_phase_2_inputs,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[idea_input, plan_input, code_display]
|
324 |
+
)
|
325 |
+
|
326 |
+
# Start research agent
|
327 |
+
start_exp_agnet.click(
|
328 |
+
fn=start_experiment_agent,
|
329 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
330 |
+
outputs=[code_display, log, response, feedback]
|
331 |
+
)
|
332 |
+
|
333 |
+
submit_button.click(
|
334 |
+
fn=submit_feedback,
|
335 |
+
inputs=[feedback, log, response],
|
336 |
+
outputs=[log, response, code_display, feedback]
|
337 |
+
)
|
338 |
+
|
339 |
+
# Test
|
340 |
+
if __name__ == "__main__":
|
341 |
+
step_index = 0
|
342 |
+
app.launch(share=True)
|
.history/app_20250404172040.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
global state_extract, index_ex, state_example
|
56 |
+
state_extract = True
|
57 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
58 |
+
print("Mismatch detected.")
|
59 |
+
print(paper_text)
|
60 |
+
return "", "", "", ""
|
61 |
+
example = example_data[index_ex]
|
62 |
+
tasks = example['research_tasks']
|
63 |
+
gaps = example['research_gaps']
|
64 |
+
keywords = example['keywords']
|
65 |
+
recent_works = "\n".join(example['recent_works'])
|
66 |
+
return paper_text
|
67 |
+
|
68 |
+
example_text = [load_example(1), load_example(2)]
|
69 |
+
|
70 |
+
# Function to handle example clicks
|
71 |
+
def load_example_and_set_index(paper_text_input):
|
72 |
+
global index_ex, state_example
|
73 |
+
state_example = True
|
74 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
75 |
+
paper_text = load_example(index_ex)
|
76 |
+
|
77 |
+
return paper_text, "", "", "", "", "", ""
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
########## Phase 1 ##############
|
82 |
+
|
83 |
+
def extract_research_elements(paper_text):
|
84 |
+
global state_extract, index_ex, state_example
|
85 |
+
if not state_example or paper_text == "":
|
86 |
+
return "", "", "", ""
|
87 |
+
state_extract = True
|
88 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
89 |
+
print("Mismatch detected.")
|
90 |
+
print(paper_text)
|
91 |
+
return "", "", "", ""
|
92 |
+
example = example_data[index_ex]
|
93 |
+
tasks = example['research_tasks']
|
94 |
+
gaps = example['research_gaps']
|
95 |
+
keywords = example['keywords']
|
96 |
+
recent_works = "\n".join(example['recent_works'])
|
97 |
+
return tasks, gaps, keywords, recent_works
|
98 |
+
|
99 |
+
|
100 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
101 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
102 |
+
if (not state_extract or not state_example or paper_text == ""):
|
103 |
+
return "", "", "", ""
|
104 |
+
global state_generate, index_ex
|
105 |
+
state_generate = True
|
106 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
107 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
108 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
109 |
+
|
110 |
+
########## Phase 2 & 3 ##############
|
111 |
+
def start_experiment_agent(hypothesis, plan):
|
112 |
+
if (not state_extract or not state_generate or not state_example):
|
113 |
+
return "", "", ""
|
114 |
+
global state_agent, step_index, state_complete
|
115 |
+
state_agent = True
|
116 |
+
step_index = 0
|
117 |
+
state_complete = False
|
118 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
119 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
120 |
+
|
121 |
+
def submit_feedback(user_feedback, history, previous_response):
|
122 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
123 |
+
return "", "", ""
|
124 |
+
global step_index, state_complete
|
125 |
+
step_index += 1
|
126 |
+
msg = history
|
127 |
+
if step_index < len(process_steps):
|
128 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
129 |
+
response_info = process_steps[step_index]
|
130 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
131 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
132 |
+
step_index += 1
|
133 |
+
msg += response
|
134 |
+
else:
|
135 |
+
state_complete = True
|
136 |
+
response = "Agent Finished."
|
137 |
+
|
138 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
139 |
+
|
140 |
+
def load_phase_2_inputs(hypothesis, plan):
|
141 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
predefined_action_log = """
|
146 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
147 |
+
[Action]: Inspect Script (train.py)
|
148 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
149 |
+
Objective: Understand the training script, including data processing, [...]
|
150 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
151 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
152 |
+
"""
|
153 |
+
|
154 |
+
|
155 |
+
predefined_observation = """
|
156 |
+
Epoch [1/10],
|
157 |
+
Train MSE: 0.543,
|
158 |
+
Test MSE: 0.688
|
159 |
+
Epoch [2/10],
|
160 |
+
Train MSE: 0.242,
|
161 |
+
Test MSE: 0.493\n
|
162 |
+
"""
|
163 |
+
|
164 |
+
# Initialize the global step_index and history
|
165 |
+
process_steps = [
|
166 |
+
{
|
167 |
+
"Action": "Inspect Script Lines (train.py)",
|
168 |
+
"Observation": (
|
169 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
170 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
171 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
172 |
+
"predict exist without implementations."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Execute Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
179 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
180 |
+
),
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Edit Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
186 |
+
"The edited train.py now has clearly defined functions"
|
187 |
+
"for data loading (load_data), model definition (build_model), "
|
188 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
189 |
+
),
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Retrieve Model",
|
193 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Execute Script (train.py)",
|
197 |
+
"Observation": (
|
198 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
199 |
+
"the decrease in loss indicates improved model performance."
|
200 |
+
)
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"Action": "Evaluation",
|
204 |
+
"Observation": predefined_observation,
|
205 |
+
}
|
206 |
+
]
|
207 |
+
def info_to_message(info):
|
208 |
+
msg = ""
|
209 |
+
for k, v in info.items():
|
210 |
+
if isinstance(v, dict):
|
211 |
+
tempv = v
|
212 |
+
v = ""
|
213 |
+
for k2, v2 in tempv.items():
|
214 |
+
v += f"{k2}:\n {v2}\n"
|
215 |
+
v = User.indent_text(v, 2)
|
216 |
+
msg += '-' * 64
|
217 |
+
msg += '\n'
|
218 |
+
msg += f"{k}:\n{v}\n"
|
219 |
+
return msg
|
220 |
+
|
221 |
+
|
222 |
+
def handle_example_click(example_index):
|
223 |
+
global index_ex
|
224 |
+
index_ex = example_index
|
225 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
226 |
+
|
227 |
+
# Gradio Interface
|
228 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
229 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
230 |
+
gr.Markdown("### ")
|
231 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
232 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
237 |
+
|
238 |
+
|
239 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
240 |
+
|
241 |
+
|
242 |
+
# Use state variables to store generated hypothesis and experiment plan
|
243 |
+
hypothesis_state = gr.State("")
|
244 |
+
experiment_plan_state = gr.State("")
|
245 |
+
|
246 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
247 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
248 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
249 |
+
with gr.Row():
|
250 |
+
with gr.Column():
|
251 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
252 |
+
|
253 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
254 |
+
with gr.Row():
|
255 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
256 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
257 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
258 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
259 |
+
with gr.Column():
|
260 |
+
with gr.Row(): # Move the button to the top
|
261 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
262 |
+
with gr.Group():
|
263 |
+
gr.Markdown("### 🌟 Research Idea")
|
264 |
+
with gr.Row():
|
265 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
266 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
267 |
+
|
268 |
+
|
269 |
+
# Step 1: Extract Research Elements
|
270 |
+
# extract_button.click(
|
271 |
+
# fn=extract_research_elements,
|
272 |
+
# inputs=paper_text_input,
|
273 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
274 |
+
# )
|
275 |
+
|
276 |
+
generate_button.click(
|
277 |
+
fn=generate_and_store,
|
278 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
279 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
280 |
+
)
|
281 |
+
|
282 |
+
gr.Examples(
|
283 |
+
examples=example_text,
|
284 |
+
inputs=[paper_text_input],
|
285 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
286 |
+
fn=load_example_and_set_index,
|
287 |
+
run_on_click = True,
|
288 |
+
# label="⬇️ Click an example to load"
|
289 |
+
)
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
295 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
296 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
297 |
+
|
298 |
+
with gr.Row():
|
299 |
+
with gr.Column():
|
300 |
+
with gr.Group():
|
301 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
302 |
+
with gr.Row():
|
303 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
304 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
308 |
+
with gr.Group():
|
309 |
+
gr.Markdown("### Implementation + Execution Log")
|
310 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
311 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
312 |
+
|
313 |
+
with gr.Column():
|
314 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
315 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
316 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
317 |
+
|
318 |
+
hypothesis_state.change(
|
319 |
+
fn=load_phase_2_inputs,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[idea_input, plan_input, code_display]
|
322 |
+
)
|
323 |
+
|
324 |
+
# Start research agent
|
325 |
+
start_exp_agnet.click(
|
326 |
+
fn=start_experiment_agent,
|
327 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
328 |
+
outputs=[code_display, log, response, feedback]
|
329 |
+
)
|
330 |
+
|
331 |
+
submit_button.click(
|
332 |
+
fn=submit_feedback,
|
333 |
+
inputs=[feedback, log, response],
|
334 |
+
outputs=[log, response, code_display, feedback]
|
335 |
+
)
|
336 |
+
|
337 |
+
# Test
|
338 |
+
if __name__ == "__main__":
|
339 |
+
step_index = 0
|
340 |
+
app.launch(share=True)
|
.history/app_20250404172125.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return paper_text, tasks, gaps, keywords, recent_works
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
paper_text = load_example(index_ex)
|
69 |
+
|
70 |
+
return paper_text, "", "", "", "", "", ""
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404172143.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
paper_text = load_example(index_ex)
|
69 |
+
|
70 |
+
return paper_text, "", "", "", "", "", ""
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404172217.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
example = load_example(index_ex)
|
69 |
+
|
70 |
+
return example
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404172253.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
example = load_example(index_ex)
|
69 |
+
|
70 |
+
return example
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404173244.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return paper_text, tasks, gaps, keywords, recent_works
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
example = load_example(index_ex)
|
69 |
+
|
70 |
+
return example
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404173256.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
example = load_example(index_ex)
|
69 |
+
|
70 |
+
return example
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404173257.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
tasks = example['research_tasks']
|
56 |
+
gaps = example['research_gaps']
|
57 |
+
keywords = example['keywords']
|
58 |
+
recent_works = "\n".join(example['recent_works'])
|
59 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
60 |
+
|
61 |
+
example_text = [load_example(1), load_example(2)]
|
62 |
+
|
63 |
+
# Function to handle example clicks
|
64 |
+
def load_example_and_set_index(paper_text_input):
|
65 |
+
global index_ex, state_example
|
66 |
+
state_example = True
|
67 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
68 |
+
example = load_example(index_ex)
|
69 |
+
|
70 |
+
return example
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
########## Phase 1 ##############
|
75 |
+
|
76 |
+
def extract_research_elements(paper_text):
|
77 |
+
global state_extract, index_ex, state_example
|
78 |
+
if not state_example or paper_text == "":
|
79 |
+
return "", "", "", ""
|
80 |
+
state_extract = True
|
81 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
82 |
+
print("Mismatch detected.")
|
83 |
+
print(paper_text)
|
84 |
+
return "", "", "", ""
|
85 |
+
example = example_data[index_ex]
|
86 |
+
tasks = example['research_tasks']
|
87 |
+
gaps = example['research_gaps']
|
88 |
+
keywords = example['keywords']
|
89 |
+
recent_works = "\n".join(example['recent_works'])
|
90 |
+
return tasks, gaps, keywords, recent_works
|
91 |
+
|
92 |
+
|
93 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
94 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
95 |
+
if (not state_extract or not state_example or paper_text == ""):
|
96 |
+
return "", "", "", ""
|
97 |
+
global state_generate, index_ex
|
98 |
+
state_generate = True
|
99 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
100 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
101 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
102 |
+
|
103 |
+
########## Phase 2 & 3 ##############
|
104 |
+
def start_experiment_agent(hypothesis, plan):
|
105 |
+
if (not state_extract or not state_generate or not state_example):
|
106 |
+
return "", "", ""
|
107 |
+
global state_agent, step_index, state_complete
|
108 |
+
state_agent = True
|
109 |
+
step_index = 0
|
110 |
+
state_complete = False
|
111 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
112 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
113 |
+
|
114 |
+
def submit_feedback(user_feedback, history, previous_response):
|
115 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
116 |
+
return "", "", ""
|
117 |
+
global step_index, state_complete
|
118 |
+
step_index += 1
|
119 |
+
msg = history
|
120 |
+
if step_index < len(process_steps):
|
121 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
122 |
+
response_info = process_steps[step_index]
|
123 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
124 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
125 |
+
step_index += 1
|
126 |
+
msg += response
|
127 |
+
else:
|
128 |
+
state_complete = True
|
129 |
+
response = "Agent Finished."
|
130 |
+
|
131 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
132 |
+
|
133 |
+
def load_phase_2_inputs(hypothesis, plan):
|
134 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
predefined_action_log = """
|
139 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
140 |
+
[Action]: Inspect Script (train.py)
|
141 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
142 |
+
Objective: Understand the training script, including data processing, [...]
|
143 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
144 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
145 |
+
"""
|
146 |
+
|
147 |
+
|
148 |
+
predefined_observation = """
|
149 |
+
Epoch [1/10],
|
150 |
+
Train MSE: 0.543,
|
151 |
+
Test MSE: 0.688
|
152 |
+
Epoch [2/10],
|
153 |
+
Train MSE: 0.242,
|
154 |
+
Test MSE: 0.493\n
|
155 |
+
"""
|
156 |
+
|
157 |
+
# Initialize the global step_index and history
|
158 |
+
process_steps = [
|
159 |
+
{
|
160 |
+
"Action": "Inspect Script Lines (train.py)",
|
161 |
+
"Observation": (
|
162 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
163 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
164 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
165 |
+
"predict exist without implementations."
|
166 |
+
),
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"Action": "Execute Script (train.py)",
|
170 |
+
"Observation": (
|
171 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
172 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Edit Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
179 |
+
"The edited train.py now has clearly defined functions"
|
180 |
+
"for data loading (load_data), model definition (build_model), "
|
181 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
182 |
+
),
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"Action": "Retrieve Model",
|
186 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"Action": "Execute Script (train.py)",
|
190 |
+
"Observation": (
|
191 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
192 |
+
"the decrease in loss indicates improved model performance."
|
193 |
+
)
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Evaluation",
|
197 |
+
"Observation": predefined_observation,
|
198 |
+
}
|
199 |
+
]
|
200 |
+
def info_to_message(info):
|
201 |
+
msg = ""
|
202 |
+
for k, v in info.items():
|
203 |
+
if isinstance(v, dict):
|
204 |
+
tempv = v
|
205 |
+
v = ""
|
206 |
+
for k2, v2 in tempv.items():
|
207 |
+
v += f"{k2}:\n {v2}\n"
|
208 |
+
v = User.indent_text(v, 2)
|
209 |
+
msg += '-' * 64
|
210 |
+
msg += '\n'
|
211 |
+
msg += f"{k}:\n{v}\n"
|
212 |
+
return msg
|
213 |
+
|
214 |
+
|
215 |
+
def handle_example_click(example_index):
|
216 |
+
global index_ex
|
217 |
+
index_ex = example_index
|
218 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
219 |
+
|
220 |
+
# Gradio Interface
|
221 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
222 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
223 |
+
gr.Markdown("### ")
|
224 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
225 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
230 |
+
|
231 |
+
|
232 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
233 |
+
|
234 |
+
|
235 |
+
# Use state variables to store generated hypothesis and experiment plan
|
236 |
+
hypothesis_state = gr.State("")
|
237 |
+
experiment_plan_state = gr.State("")
|
238 |
+
|
239 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
240 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
241 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column():
|
244 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
245 |
+
|
246 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
247 |
+
with gr.Row():
|
248 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
249 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
250 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
251 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
252 |
+
with gr.Column():
|
253 |
+
with gr.Row(): # Move the button to the top
|
254 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
255 |
+
with gr.Group():
|
256 |
+
gr.Markdown("### 🌟 Research Idea")
|
257 |
+
with gr.Row():
|
258 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
259 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
260 |
+
|
261 |
+
|
262 |
+
# Step 1: Extract Research Elements
|
263 |
+
# extract_button.click(
|
264 |
+
# fn=extract_research_elements,
|
265 |
+
# inputs=paper_text_input,
|
266 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
267 |
+
# )
|
268 |
+
|
269 |
+
generate_button.click(
|
270 |
+
fn=generate_and_store,
|
271 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
272 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
gr.Examples(
|
276 |
+
examples=example_text,
|
277 |
+
inputs=[paper_text_input],
|
278 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
279 |
+
fn=load_example_and_set_index,
|
280 |
+
run_on_click = True,
|
281 |
+
# label="⬇️ Click an example to load"
|
282 |
+
)
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
288 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
289 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
290 |
+
|
291 |
+
with gr.Row():
|
292 |
+
with gr.Column():
|
293 |
+
with gr.Group():
|
294 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
295 |
+
with gr.Row():
|
296 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
297 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### Implementation + Execution Log")
|
303 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
304 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
308 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
309 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
310 |
+
|
311 |
+
hypothesis_state.change(
|
312 |
+
fn=load_phase_2_inputs,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[idea_input, plan_input, code_display]
|
315 |
+
)
|
316 |
+
|
317 |
+
# Start research agent
|
318 |
+
start_exp_agnet.click(
|
319 |
+
fn=start_experiment_agent,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[code_display, log, response, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
submit_button.click(
|
325 |
+
fn=submit_feedback,
|
326 |
+
inputs=[feedback, log, response],
|
327 |
+
outputs=[log, response, code_display, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
# Test
|
331 |
+
if __name__ == "__main__":
|
332 |
+
step_index = 0
|
333 |
+
app.launch(share=True)
|
.history/app_20250404174324.py
ADDED
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# WRONG: example_text = [load_example(1), load_example(2)]
|
63 |
+
|
64 |
+
# ✅ FIXED:
|
65 |
+
example_text = []
|
66 |
+
for i in ["1", "2"]:
|
67 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
68 |
+
example_text.append(paper_text)
|
69 |
+
|
70 |
+
|
71 |
+
# Function to handle example clicks
|
72 |
+
def load_example_and_set_index(paper_text_input):
|
73 |
+
global index_ex, state_example
|
74 |
+
state_example = True
|
75 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
76 |
+
example = load_example(index_ex)
|
77 |
+
|
78 |
+
return example
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
########## Phase 1 ##############
|
83 |
+
|
84 |
+
def extract_research_elements(paper_text):
|
85 |
+
global state_extract, index_ex, state_example
|
86 |
+
if not state_example or paper_text == "":
|
87 |
+
return "", "", "", ""
|
88 |
+
state_extract = True
|
89 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
90 |
+
print("Mismatch detected.")
|
91 |
+
print(paper_text)
|
92 |
+
return "", "", "", ""
|
93 |
+
example = example_data[index_ex]
|
94 |
+
tasks = example['research_tasks']
|
95 |
+
gaps = example['research_gaps']
|
96 |
+
keywords = example['keywords']
|
97 |
+
recent_works = "\n".join(example['recent_works'])
|
98 |
+
return tasks, gaps, keywords, recent_works
|
99 |
+
|
100 |
+
|
101 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
102 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
103 |
+
if (not state_extract or not state_example or paper_text == ""):
|
104 |
+
return "", "", "", ""
|
105 |
+
global state_generate, index_ex
|
106 |
+
state_generate = True
|
107 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
108 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
109 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
110 |
+
|
111 |
+
########## Phase 2 & 3 ##############
|
112 |
+
def start_experiment_agent(hypothesis, plan):
|
113 |
+
if (not state_extract or not state_generate or not state_example):
|
114 |
+
return "", "", ""
|
115 |
+
global state_agent, step_index, state_complete
|
116 |
+
state_agent = True
|
117 |
+
step_index = 0
|
118 |
+
state_complete = False
|
119 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
120 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
121 |
+
|
122 |
+
def submit_feedback(user_feedback, history, previous_response):
|
123 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
124 |
+
return "", "", ""
|
125 |
+
global step_index, state_complete
|
126 |
+
step_index += 1
|
127 |
+
msg = history
|
128 |
+
if step_index < len(process_steps):
|
129 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
130 |
+
response_info = process_steps[step_index]
|
131 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
132 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
133 |
+
step_index += 1
|
134 |
+
msg += response
|
135 |
+
else:
|
136 |
+
state_complete = True
|
137 |
+
response = "Agent Finished."
|
138 |
+
|
139 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
140 |
+
|
141 |
+
def load_phase_2_inputs(hypothesis, plan):
|
142 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
predefined_action_log = """
|
147 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
148 |
+
[Action]: Inspect Script (train.py)
|
149 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
150 |
+
Objective: Understand the training script, including data processing, [...]
|
151 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
152 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
153 |
+
"""
|
154 |
+
|
155 |
+
|
156 |
+
predefined_observation = """
|
157 |
+
Epoch [1/10],
|
158 |
+
Train MSE: 0.543,
|
159 |
+
Test MSE: 0.688
|
160 |
+
Epoch [2/10],
|
161 |
+
Train MSE: 0.242,
|
162 |
+
Test MSE: 0.493\n
|
163 |
+
"""
|
164 |
+
|
165 |
+
# Initialize the global step_index and history
|
166 |
+
process_steps = [
|
167 |
+
{
|
168 |
+
"Action": "Inspect Script Lines (train.py)",
|
169 |
+
"Observation": (
|
170 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
171 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
172 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
173 |
+
"predict exist without implementations."
|
174 |
+
),
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"Action": "Execute Script (train.py)",
|
178 |
+
"Observation": (
|
179 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
180 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
181 |
+
),
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"Action": "Edit Script (train.py)",
|
185 |
+
"Observation": (
|
186 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
187 |
+
"The edited train.py now has clearly defined functions"
|
188 |
+
"for data loading (load_data), model definition (build_model), "
|
189 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
190 |
+
),
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"Action": "Retrieve Model",
|
194 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"Action": "Execute Script (train.py)",
|
198 |
+
"Observation": (
|
199 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
200 |
+
"the decrease in loss indicates improved model performance."
|
201 |
+
)
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"Action": "Evaluation",
|
205 |
+
"Observation": predefined_observation,
|
206 |
+
}
|
207 |
+
]
|
208 |
+
def info_to_message(info):
|
209 |
+
msg = ""
|
210 |
+
for k, v in info.items():
|
211 |
+
if isinstance(v, dict):
|
212 |
+
tempv = v
|
213 |
+
v = ""
|
214 |
+
for k2, v2 in tempv.items():
|
215 |
+
v += f"{k2}:\n {v2}\n"
|
216 |
+
v = User.indent_text(v, 2)
|
217 |
+
msg += '-' * 64
|
218 |
+
msg += '\n'
|
219 |
+
msg += f"{k}:\n{v}\n"
|
220 |
+
return msg
|
221 |
+
|
222 |
+
|
223 |
+
def handle_example_click(example_index):
|
224 |
+
global index_ex
|
225 |
+
index_ex = example_index
|
226 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
227 |
+
|
228 |
+
# Gradio Interface
|
229 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
230 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
231 |
+
gr.Markdown("### ")
|
232 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
233 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
238 |
+
|
239 |
+
|
240 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
241 |
+
|
242 |
+
|
243 |
+
# Use state variables to store generated hypothesis and experiment plan
|
244 |
+
hypothesis_state = gr.State("")
|
245 |
+
experiment_plan_state = gr.State("")
|
246 |
+
|
247 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
248 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
249 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
250 |
+
with gr.Row():
|
251 |
+
with gr.Column():
|
252 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
253 |
+
|
254 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
255 |
+
with gr.Row():
|
256 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
257 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
258 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
259 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
260 |
+
with gr.Column():
|
261 |
+
with gr.Row(): # Move the button to the top
|
262 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
263 |
+
with gr.Group():
|
264 |
+
gr.Markdown("### 🌟 Research Idea")
|
265 |
+
with gr.Row():
|
266 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
267 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
268 |
+
|
269 |
+
|
270 |
+
# Step 1: Extract Research Elements
|
271 |
+
# extract_button.click(
|
272 |
+
# fn=extract_research_elements,
|
273 |
+
# inputs=paper_text_input,
|
274 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
275 |
+
# )
|
276 |
+
|
277 |
+
generate_button.click(
|
278 |
+
fn=generate_and_store,
|
279 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
280 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
281 |
+
)
|
282 |
+
|
283 |
+
gr.Examples(
|
284 |
+
examples=example_text,
|
285 |
+
inputs=[paper_text_input],
|
286 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
287 |
+
fn=load_example_and_set_index,
|
288 |
+
run_on_click = True,
|
289 |
+
# label="⬇️ Click an example to load"
|
290 |
+
)
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
296 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
297 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
298 |
+
|
299 |
+
with gr.Row():
|
300 |
+
with gr.Column():
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
303 |
+
with gr.Row():
|
304 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
305 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
306 |
+
|
307 |
+
with gr.Column():
|
308 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
309 |
+
with gr.Group():
|
310 |
+
gr.Markdown("### Implementation + Execution Log")
|
311 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
312 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
313 |
+
|
314 |
+
with gr.Column():
|
315 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
316 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
317 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
318 |
+
|
319 |
+
hypothesis_state.change(
|
320 |
+
fn=load_phase_2_inputs,
|
321 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
322 |
+
outputs=[idea_input, plan_input, code_display]
|
323 |
+
)
|
324 |
+
|
325 |
+
# Start research agent
|
326 |
+
start_exp_agnet.click(
|
327 |
+
fn=start_experiment_agent,
|
328 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
329 |
+
outputs=[code_display, log, response, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
submit_button.click(
|
333 |
+
fn=submit_feedback,
|
334 |
+
inputs=[feedback, log, response],
|
335 |
+
outputs=[log, response, code_display, feedback]
|
336 |
+
)
|
337 |
+
|
338 |
+
# Test
|
339 |
+
if __name__ == "__main__":
|
340 |
+
step_index = 0
|
341 |
+
app.launch(share=True)
|
.history/app_20250404174352.py
ADDED
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# WRONG: example_text = [load_example(1), load_example(2)]
|
63 |
+
|
64 |
+
# ✅ FIXED:
|
65 |
+
example_text = []
|
66 |
+
for i in ["1", "2"]:
|
67 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
68 |
+
example_text.append(paper_text)
|
69 |
+
|
70 |
+
|
71 |
+
# Function to handle example clicks
|
72 |
+
def load_example_and_set_index(paper_text_input):
|
73 |
+
global index_ex, state_example
|
74 |
+
state_example = True
|
75 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
76 |
+
example = load_example(index_ex)
|
77 |
+
|
78 |
+
return example
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
########## Phase 1 ##############
|
83 |
+
|
84 |
+
def extract_research_elements(paper_text):
|
85 |
+
global state_extract, index_ex, state_example
|
86 |
+
if not state_example or paper_text == "":
|
87 |
+
return "", "", "", ""
|
88 |
+
state_extract = True
|
89 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
90 |
+
print("Mismatch detected.")
|
91 |
+
print(paper_text)
|
92 |
+
return "", "", "", ""
|
93 |
+
example = example_data[index_ex]
|
94 |
+
tasks = example['research_tasks']
|
95 |
+
gaps = example['research_gaps']
|
96 |
+
keywords = example['keywords']
|
97 |
+
recent_works = "\n".join(example['recent_works'])
|
98 |
+
return tasks, gaps, keywords, recent_works
|
99 |
+
|
100 |
+
|
101 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
102 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
103 |
+
if (not state_extract or not state_example or paper_text == ""):
|
104 |
+
return "", "", "", ""
|
105 |
+
global state_generate, index_ex
|
106 |
+
state_generate = True
|
107 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
108 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
109 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
110 |
+
|
111 |
+
########## Phase 2 & 3 ##############
|
112 |
+
def start_experiment_agent(hypothesis, plan):
|
113 |
+
if (not state_extract or not state_generate or not state_example):
|
114 |
+
return "", "", ""
|
115 |
+
global state_agent, step_index, state_complete
|
116 |
+
state_agent = True
|
117 |
+
step_index = 0
|
118 |
+
state_complete = False
|
119 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
120 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
121 |
+
|
122 |
+
def submit_feedback(user_feedback, history, previous_response):
|
123 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
124 |
+
return "", "", ""
|
125 |
+
global step_index, state_complete
|
126 |
+
step_index += 1
|
127 |
+
msg = history
|
128 |
+
if step_index < len(process_steps):
|
129 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
130 |
+
response_info = process_steps[step_index]
|
131 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
132 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
133 |
+
step_index += 1
|
134 |
+
msg += response
|
135 |
+
else:
|
136 |
+
state_complete = True
|
137 |
+
response = "Agent Finished."
|
138 |
+
|
139 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
140 |
+
|
141 |
+
def load_phase_2_inputs(hypothesis, plan):
|
142 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
predefined_action_log = """
|
147 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
148 |
+
[Action]: Inspect Script (train.py)
|
149 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
150 |
+
Objective: Understand the training script, including data processing, [...]
|
151 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
152 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
153 |
+
"""
|
154 |
+
|
155 |
+
|
156 |
+
predefined_observation = """
|
157 |
+
Epoch [1/10],
|
158 |
+
Train MSE: 0.543,
|
159 |
+
Test MSE: 0.688
|
160 |
+
Epoch [2/10],
|
161 |
+
Train MSE: 0.242,
|
162 |
+
Test MSE: 0.493\n
|
163 |
+
"""
|
164 |
+
|
165 |
+
# Initialize the global step_index and history
|
166 |
+
process_steps = [
|
167 |
+
{
|
168 |
+
"Action": "Inspect Script Lines (train.py)",
|
169 |
+
"Observation": (
|
170 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
171 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
172 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
173 |
+
"predict exist without implementations."
|
174 |
+
),
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"Action": "Execute Script (train.py)",
|
178 |
+
"Observation": (
|
179 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
180 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
181 |
+
),
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"Action": "Edit Script (train.py)",
|
185 |
+
"Observation": (
|
186 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
187 |
+
"The edited train.py now has clearly defined functions"
|
188 |
+
"for data loading (load_data), model definition (build_model), "
|
189 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
190 |
+
),
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"Action": "Retrieve Model",
|
194 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"Action": "Execute Script (train.py)",
|
198 |
+
"Observation": (
|
199 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
200 |
+
"the decrease in loss indicates improved model performance."
|
201 |
+
)
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"Action": "Evaluation",
|
205 |
+
"Observation": predefined_observation,
|
206 |
+
}
|
207 |
+
]
|
208 |
+
def info_to_message(info):
|
209 |
+
msg = ""
|
210 |
+
for k, v in info.items():
|
211 |
+
if isinstance(v, dict):
|
212 |
+
tempv = v
|
213 |
+
v = ""
|
214 |
+
for k2, v2 in tempv.items():
|
215 |
+
v += f"{k2}:\n {v2}\n"
|
216 |
+
v = User.indent_text(v, 2)
|
217 |
+
msg += '-' * 64
|
218 |
+
msg += '\n'
|
219 |
+
msg += f"{k}:\n{v}\n"
|
220 |
+
return msg
|
221 |
+
|
222 |
+
|
223 |
+
def handle_example_click(example_index):
|
224 |
+
global index_ex
|
225 |
+
index_ex = example_index
|
226 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
227 |
+
|
228 |
+
# Gradio Interface
|
229 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
230 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
231 |
+
gr.Markdown("### ")
|
232 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
233 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
238 |
+
|
239 |
+
|
240 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
241 |
+
|
242 |
+
|
243 |
+
# Use state variables to store generated hypothesis and experiment plan
|
244 |
+
hypothesis_state = gr.State("")
|
245 |
+
experiment_plan_state = gr.State("")
|
246 |
+
|
247 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
248 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
249 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
250 |
+
with gr.Row():
|
251 |
+
with gr.Column():
|
252 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
253 |
+
|
254 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
255 |
+
with gr.Row():
|
256 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
257 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
258 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
259 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
260 |
+
with gr.Column():
|
261 |
+
with gr.Row(): # Move the button to the top
|
262 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
263 |
+
with gr.Group():
|
264 |
+
gr.Markdown("### 🌟 Research Idea")
|
265 |
+
with gr.Row():
|
266 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
267 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
268 |
+
|
269 |
+
|
270 |
+
# Step 1: Extract Research Elements
|
271 |
+
# extract_button.click(
|
272 |
+
# fn=extract_research_elements,
|
273 |
+
# inputs=paper_text_input,
|
274 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
275 |
+
# )
|
276 |
+
|
277 |
+
generate_button.click(
|
278 |
+
fn=generate_and_store,
|
279 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
280 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
281 |
+
)
|
282 |
+
|
283 |
+
gr.Examples(
|
284 |
+
examples=example_text,
|
285 |
+
inputs=[paper_text_input],
|
286 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
287 |
+
fn=load_example_and_set_index,
|
288 |
+
run_on_click = True,
|
289 |
+
# label="⬇️ Click an example to load"
|
290 |
+
)
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
296 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
297 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
298 |
+
|
299 |
+
with gr.Row():
|
300 |
+
with gr.Column():
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
303 |
+
with gr.Row():
|
304 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
305 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
306 |
+
|
307 |
+
with gr.Column():
|
308 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
309 |
+
with gr.Group():
|
310 |
+
gr.Markdown("### Implementation + Execution Log")
|
311 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
312 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
313 |
+
|
314 |
+
with gr.Column():
|
315 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
316 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
317 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
318 |
+
|
319 |
+
hypothesis_state.change(
|
320 |
+
fn=load_phase_2_inputs,
|
321 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
322 |
+
outputs=[idea_input, plan_input, code_display]
|
323 |
+
)
|
324 |
+
|
325 |
+
# Start research agent
|
326 |
+
start_exp_agnet.click(
|
327 |
+
fn=start_experiment_agent,
|
328 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
329 |
+
outputs=[code_display, log, response, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
submit_button.click(
|
333 |
+
fn=submit_feedback,
|
334 |
+
inputs=[feedback, log, response],
|
335 |
+
outputs=[log, response, code_display, feedback]
|
336 |
+
)
|
337 |
+
|
338 |
+
# Test
|
339 |
+
if __name__ == "__main__":
|
340 |
+
step_index = 0
|
341 |
+
app.launch(share=True)
|
.history/app_20250404174355.py
ADDED
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# WRONG: example_text = [load_example(1), load_example(2)]
|
63 |
+
# ✅ FIXED:
|
64 |
+
example_text = []
|
65 |
+
for i in ["1", "2"]:
|
66 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
67 |
+
example_text.append(paper_text)
|
68 |
+
|
69 |
+
|
70 |
+
# Function to handle example clicks
|
71 |
+
def load_example_and_set_index(paper_text_input):
|
72 |
+
global index_ex, state_example
|
73 |
+
state_example = True
|
74 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
75 |
+
example = load_example(index_ex)
|
76 |
+
|
77 |
+
return example
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
########## Phase 1 ##############
|
82 |
+
|
83 |
+
def extract_research_elements(paper_text):
|
84 |
+
global state_extract, index_ex, state_example
|
85 |
+
if not state_example or paper_text == "":
|
86 |
+
return "", "", "", ""
|
87 |
+
state_extract = True
|
88 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
89 |
+
print("Mismatch detected.")
|
90 |
+
print(paper_text)
|
91 |
+
return "", "", "", ""
|
92 |
+
example = example_data[index_ex]
|
93 |
+
tasks = example['research_tasks']
|
94 |
+
gaps = example['research_gaps']
|
95 |
+
keywords = example['keywords']
|
96 |
+
recent_works = "\n".join(example['recent_works'])
|
97 |
+
return tasks, gaps, keywords, recent_works
|
98 |
+
|
99 |
+
|
100 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
101 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
102 |
+
if (not state_extract or not state_example or paper_text == ""):
|
103 |
+
return "", "", "", ""
|
104 |
+
global state_generate, index_ex
|
105 |
+
state_generate = True
|
106 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
107 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
108 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
109 |
+
|
110 |
+
########## Phase 2 & 3 ##############
|
111 |
+
def start_experiment_agent(hypothesis, plan):
|
112 |
+
if (not state_extract or not state_generate or not state_example):
|
113 |
+
return "", "", ""
|
114 |
+
global state_agent, step_index, state_complete
|
115 |
+
state_agent = True
|
116 |
+
step_index = 0
|
117 |
+
state_complete = False
|
118 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
119 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
120 |
+
|
121 |
+
def submit_feedback(user_feedback, history, previous_response):
|
122 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
123 |
+
return "", "", ""
|
124 |
+
global step_index, state_complete
|
125 |
+
step_index += 1
|
126 |
+
msg = history
|
127 |
+
if step_index < len(process_steps):
|
128 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
129 |
+
response_info = process_steps[step_index]
|
130 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
131 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
132 |
+
step_index += 1
|
133 |
+
msg += response
|
134 |
+
else:
|
135 |
+
state_complete = True
|
136 |
+
response = "Agent Finished."
|
137 |
+
|
138 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
139 |
+
|
140 |
+
def load_phase_2_inputs(hypothesis, plan):
|
141 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
predefined_action_log = """
|
146 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
147 |
+
[Action]: Inspect Script (train.py)
|
148 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
149 |
+
Objective: Understand the training script, including data processing, [...]
|
150 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
151 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
152 |
+
"""
|
153 |
+
|
154 |
+
|
155 |
+
predefined_observation = """
|
156 |
+
Epoch [1/10],
|
157 |
+
Train MSE: 0.543,
|
158 |
+
Test MSE: 0.688
|
159 |
+
Epoch [2/10],
|
160 |
+
Train MSE: 0.242,
|
161 |
+
Test MSE: 0.493\n
|
162 |
+
"""
|
163 |
+
|
164 |
+
# Initialize the global step_index and history
|
165 |
+
process_steps = [
|
166 |
+
{
|
167 |
+
"Action": "Inspect Script Lines (train.py)",
|
168 |
+
"Observation": (
|
169 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
170 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
171 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
172 |
+
"predict exist without implementations."
|
173 |
+
),
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"Action": "Execute Script (train.py)",
|
177 |
+
"Observation": (
|
178 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
179 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
180 |
+
),
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Edit Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
186 |
+
"The edited train.py now has clearly defined functions"
|
187 |
+
"for data loading (load_data), model definition (build_model), "
|
188 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
189 |
+
),
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"Action": "Retrieve Model",
|
193 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"Action": "Execute Script (train.py)",
|
197 |
+
"Observation": (
|
198 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
199 |
+
"the decrease in loss indicates improved model performance."
|
200 |
+
)
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"Action": "Evaluation",
|
204 |
+
"Observation": predefined_observation,
|
205 |
+
}
|
206 |
+
]
|
207 |
+
def info_to_message(info):
|
208 |
+
msg = ""
|
209 |
+
for k, v in info.items():
|
210 |
+
if isinstance(v, dict):
|
211 |
+
tempv = v
|
212 |
+
v = ""
|
213 |
+
for k2, v2 in tempv.items():
|
214 |
+
v += f"{k2}:\n {v2}\n"
|
215 |
+
v = User.indent_text(v, 2)
|
216 |
+
msg += '-' * 64
|
217 |
+
msg += '\n'
|
218 |
+
msg += f"{k}:\n{v}\n"
|
219 |
+
return msg
|
220 |
+
|
221 |
+
|
222 |
+
def handle_example_click(example_index):
|
223 |
+
global index_ex
|
224 |
+
index_ex = example_index
|
225 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
226 |
+
|
227 |
+
# Gradio Interface
|
228 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
229 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
230 |
+
gr.Markdown("### ")
|
231 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
232 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
237 |
+
|
238 |
+
|
239 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
240 |
+
|
241 |
+
|
242 |
+
# Use state variables to store generated hypothesis and experiment plan
|
243 |
+
hypothesis_state = gr.State("")
|
244 |
+
experiment_plan_state = gr.State("")
|
245 |
+
|
246 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
247 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
248 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
249 |
+
with gr.Row():
|
250 |
+
with gr.Column():
|
251 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
252 |
+
|
253 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
254 |
+
with gr.Row():
|
255 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
256 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
257 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
258 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
259 |
+
with gr.Column():
|
260 |
+
with gr.Row(): # Move the button to the top
|
261 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
262 |
+
with gr.Group():
|
263 |
+
gr.Markdown("### 🌟 Research Idea")
|
264 |
+
with gr.Row():
|
265 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
266 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
267 |
+
|
268 |
+
|
269 |
+
# Step 1: Extract Research Elements
|
270 |
+
# extract_button.click(
|
271 |
+
# fn=extract_research_elements,
|
272 |
+
# inputs=paper_text_input,
|
273 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
274 |
+
# )
|
275 |
+
|
276 |
+
generate_button.click(
|
277 |
+
fn=generate_and_store,
|
278 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
279 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
280 |
+
)
|
281 |
+
|
282 |
+
gr.Examples(
|
283 |
+
examples=example_text,
|
284 |
+
inputs=[paper_text_input],
|
285 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
286 |
+
fn=load_example_and_set_index,
|
287 |
+
run_on_click = True,
|
288 |
+
# label="⬇️ Click an example to load"
|
289 |
+
)
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
295 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
296 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
297 |
+
|
298 |
+
with gr.Row():
|
299 |
+
with gr.Column():
|
300 |
+
with gr.Group():
|
301 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
302 |
+
with gr.Row():
|
303 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
304 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
305 |
+
|
306 |
+
with gr.Column():
|
307 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
308 |
+
with gr.Group():
|
309 |
+
gr.Markdown("### Implementation + Execution Log")
|
310 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
311 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
312 |
+
|
313 |
+
with gr.Column():
|
314 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
315 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
316 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
317 |
+
|
318 |
+
hypothesis_state.change(
|
319 |
+
fn=load_phase_2_inputs,
|
320 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
321 |
+
outputs=[idea_input, plan_input, code_display]
|
322 |
+
)
|
323 |
+
|
324 |
+
# Start research agent
|
325 |
+
start_exp_agnet.click(
|
326 |
+
fn=start_experiment_agent,
|
327 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
328 |
+
outputs=[code_display, log, response, feedback]
|
329 |
+
)
|
330 |
+
|
331 |
+
submit_button.click(
|
332 |
+
fn=submit_feedback,
|
333 |
+
inputs=[feedback, log, response],
|
334 |
+
outputs=[log, response, code_display, feedback]
|
335 |
+
)
|
336 |
+
|
337 |
+
# Test
|
338 |
+
if __name__ == "__main__":
|
339 |
+
step_index = 0
|
340 |
+
app.launch(share=True)
|
.history/app_20250404174357.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# WRONG: example_text = [load_example(1), load_example(2)]
|
63 |
+
example_text = []
|
64 |
+
for i in ["1", "2"]:
|
65 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
66 |
+
example_text.append(paper_text)
|
67 |
+
|
68 |
+
|
69 |
+
# Function to handle example clicks
|
70 |
+
def load_example_and_set_index(paper_text_input):
|
71 |
+
global index_ex, state_example
|
72 |
+
state_example = True
|
73 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
74 |
+
example = load_example(index_ex)
|
75 |
+
|
76 |
+
return example
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
########## Phase 1 ##############
|
81 |
+
|
82 |
+
def extract_research_elements(paper_text):
|
83 |
+
global state_extract, index_ex, state_example
|
84 |
+
if not state_example or paper_text == "":
|
85 |
+
return "", "", "", ""
|
86 |
+
state_extract = True
|
87 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
88 |
+
print("Mismatch detected.")
|
89 |
+
print(paper_text)
|
90 |
+
return "", "", "", ""
|
91 |
+
example = example_data[index_ex]
|
92 |
+
tasks = example['research_tasks']
|
93 |
+
gaps = example['research_gaps']
|
94 |
+
keywords = example['keywords']
|
95 |
+
recent_works = "\n".join(example['recent_works'])
|
96 |
+
return tasks, gaps, keywords, recent_works
|
97 |
+
|
98 |
+
|
99 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
100 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
101 |
+
if (not state_extract or not state_example or paper_text == ""):
|
102 |
+
return "", "", "", ""
|
103 |
+
global state_generate, index_ex
|
104 |
+
state_generate = True
|
105 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
106 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
107 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
108 |
+
|
109 |
+
########## Phase 2 & 3 ##############
|
110 |
+
def start_experiment_agent(hypothesis, plan):
|
111 |
+
if (not state_extract or not state_generate or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global state_agent, step_index, state_complete
|
114 |
+
state_agent = True
|
115 |
+
step_index = 0
|
116 |
+
state_complete = False
|
117 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
118 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
119 |
+
|
120 |
+
def submit_feedback(user_feedback, history, previous_response):
|
121 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
122 |
+
return "", "", ""
|
123 |
+
global step_index, state_complete
|
124 |
+
step_index += 1
|
125 |
+
msg = history
|
126 |
+
if step_index < len(process_steps):
|
127 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
128 |
+
response_info = process_steps[step_index]
|
129 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
130 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
131 |
+
step_index += 1
|
132 |
+
msg += response
|
133 |
+
else:
|
134 |
+
state_complete = True
|
135 |
+
response = "Agent Finished."
|
136 |
+
|
137 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
138 |
+
|
139 |
+
def load_phase_2_inputs(hypothesis, plan):
|
140 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
predefined_action_log = """
|
145 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
146 |
+
[Action]: Inspect Script (train.py)
|
147 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
148 |
+
Objective: Understand the training script, including data processing, [...]
|
149 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
150 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
|
154 |
+
predefined_observation = """
|
155 |
+
Epoch [1/10],
|
156 |
+
Train MSE: 0.543,
|
157 |
+
Test MSE: 0.688
|
158 |
+
Epoch [2/10],
|
159 |
+
Train MSE: 0.242,
|
160 |
+
Test MSE: 0.493\n
|
161 |
+
"""
|
162 |
+
|
163 |
+
# Initialize the global step_index and history
|
164 |
+
process_steps = [
|
165 |
+
{
|
166 |
+
"Action": "Inspect Script Lines (train.py)",
|
167 |
+
"Observation": (
|
168 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
169 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
170 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
171 |
+
"predict exist without implementations."
|
172 |
+
),
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"Action": "Execute Script (train.py)",
|
176 |
+
"Observation": (
|
177 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
178 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
179 |
+
),
|
180 |
+
},
|
181 |
+
{
|
182 |
+
"Action": "Edit Script (train.py)",
|
183 |
+
"Observation": (
|
184 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
185 |
+
"The edited train.py now has clearly defined functions"
|
186 |
+
"for data loading (load_data), model definition (build_model), "
|
187 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
188 |
+
),
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Retrieve Model",
|
192 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"Action": "Execute Script (train.py)",
|
196 |
+
"Observation": (
|
197 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
198 |
+
"the decrease in loss indicates improved model performance."
|
199 |
+
)
|
200 |
+
},
|
201 |
+
{
|
202 |
+
"Action": "Evaluation",
|
203 |
+
"Observation": predefined_observation,
|
204 |
+
}
|
205 |
+
]
|
206 |
+
def info_to_message(info):
|
207 |
+
msg = ""
|
208 |
+
for k, v in info.items():
|
209 |
+
if isinstance(v, dict):
|
210 |
+
tempv = v
|
211 |
+
v = ""
|
212 |
+
for k2, v2 in tempv.items():
|
213 |
+
v += f"{k2}:\n {v2}\n"
|
214 |
+
v = User.indent_text(v, 2)
|
215 |
+
msg += '-' * 64
|
216 |
+
msg += '\n'
|
217 |
+
msg += f"{k}:\n{v}\n"
|
218 |
+
return msg
|
219 |
+
|
220 |
+
|
221 |
+
def handle_example_click(example_index):
|
222 |
+
global index_ex
|
223 |
+
index_ex = example_index
|
224 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
225 |
+
|
226 |
+
# Gradio Interface
|
227 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
228 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
229 |
+
gr.Markdown("### ")
|
230 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
231 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
236 |
+
|
237 |
+
|
238 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
239 |
+
|
240 |
+
|
241 |
+
# Use state variables to store generated hypothesis and experiment plan
|
242 |
+
hypothesis_state = gr.State("")
|
243 |
+
experiment_plan_state = gr.State("")
|
244 |
+
|
245 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
246 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
247 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
248 |
+
with gr.Row():
|
249 |
+
with gr.Column():
|
250 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
251 |
+
|
252 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
253 |
+
with gr.Row():
|
254 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
255 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
256 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
257 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
258 |
+
with gr.Column():
|
259 |
+
with gr.Row(): # Move the button to the top
|
260 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
261 |
+
with gr.Group():
|
262 |
+
gr.Markdown("### 🌟 Research Idea")
|
263 |
+
with gr.Row():
|
264 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
265 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
266 |
+
|
267 |
+
|
268 |
+
# Step 1: Extract Research Elements
|
269 |
+
# extract_button.click(
|
270 |
+
# fn=extract_research_elements,
|
271 |
+
# inputs=paper_text_input,
|
272 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
273 |
+
# )
|
274 |
+
|
275 |
+
generate_button.click(
|
276 |
+
fn=generate_and_store,
|
277 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
278 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
279 |
+
)
|
280 |
+
|
281 |
+
gr.Examples(
|
282 |
+
examples=example_text,
|
283 |
+
inputs=[paper_text_input],
|
284 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
285 |
+
fn=load_example_and_set_index,
|
286 |
+
run_on_click = True,
|
287 |
+
# label="⬇️ Click an example to load"
|
288 |
+
)
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
294 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
295 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
296 |
+
|
297 |
+
with gr.Row():
|
298 |
+
with gr.Column():
|
299 |
+
with gr.Group():
|
300 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
301 |
+
with gr.Row():
|
302 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
303 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
304 |
+
|
305 |
+
with gr.Column():
|
306 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
307 |
+
with gr.Group():
|
308 |
+
gr.Markdown("### Implementation + Execution Log")
|
309 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
310 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
311 |
+
|
312 |
+
with gr.Column():
|
313 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
314 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
315 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
316 |
+
|
317 |
+
hypothesis_state.change(
|
318 |
+
fn=load_phase_2_inputs,
|
319 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
320 |
+
outputs=[idea_input, plan_input, code_display]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Start research agent
|
324 |
+
start_exp_agnet.click(
|
325 |
+
fn=start_experiment_agent,
|
326 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
327 |
+
outputs=[code_display, log, response, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
submit_button.click(
|
331 |
+
fn=submit_feedback,
|
332 |
+
inputs=[feedback, log, response],
|
333 |
+
outputs=[log, response, code_display, feedback]
|
334 |
+
)
|
335 |
+
|
336 |
+
# Test
|
337 |
+
if __name__ == "__main__":
|
338 |
+
step_index = 0
|
339 |
+
app.launch(share=True)
|
.history/app_20250404174402.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# WRONG: example_text = [load_example(1), load_example(2)]
|
63 |
+
example_text = []
|
64 |
+
for i in ["1", "2"]:
|
65 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
66 |
+
example_text.append(paper_text)
|
67 |
+
|
68 |
+
|
69 |
+
# Function to handle example clicks
|
70 |
+
def load_example_and_set_index(paper_text_input):
|
71 |
+
global index_ex, state_example
|
72 |
+
state_example = True
|
73 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
74 |
+
example = load_example(index_ex)
|
75 |
+
|
76 |
+
return example
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
########## Phase 1 ##############
|
81 |
+
|
82 |
+
def extract_research_elements(paper_text):
|
83 |
+
global state_extract, index_ex, state_example
|
84 |
+
if not state_example or paper_text == "":
|
85 |
+
return "", "", "", ""
|
86 |
+
state_extract = True
|
87 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
88 |
+
print("Mismatch detected.")
|
89 |
+
print(paper_text)
|
90 |
+
return "", "", "", ""
|
91 |
+
example = example_data[index_ex]
|
92 |
+
tasks = example['research_tasks']
|
93 |
+
gaps = example['research_gaps']
|
94 |
+
keywords = example['keywords']
|
95 |
+
recent_works = "\n".join(example['recent_works'])
|
96 |
+
return tasks, gaps, keywords, recent_works
|
97 |
+
|
98 |
+
|
99 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
100 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
101 |
+
if (not state_extract or not state_example or paper_text == ""):
|
102 |
+
return "", "", "", ""
|
103 |
+
global state_generate, index_ex
|
104 |
+
state_generate = True
|
105 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
106 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
107 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
108 |
+
|
109 |
+
########## Phase 2 & 3 ##############
|
110 |
+
def start_experiment_agent(hypothesis, plan):
|
111 |
+
if (not state_extract or not state_generate or not state_example):
|
112 |
+
return "", "", ""
|
113 |
+
global state_agent, step_index, state_complete
|
114 |
+
state_agent = True
|
115 |
+
step_index = 0
|
116 |
+
state_complete = False
|
117 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
118 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
119 |
+
|
120 |
+
def submit_feedback(user_feedback, history, previous_response):
|
121 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
122 |
+
return "", "", ""
|
123 |
+
global step_index, state_complete
|
124 |
+
step_index += 1
|
125 |
+
msg = history
|
126 |
+
if step_index < len(process_steps):
|
127 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
128 |
+
response_info = process_steps[step_index]
|
129 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
130 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
131 |
+
step_index += 1
|
132 |
+
msg += response
|
133 |
+
else:
|
134 |
+
state_complete = True
|
135 |
+
response = "Agent Finished."
|
136 |
+
|
137 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
138 |
+
|
139 |
+
def load_phase_2_inputs(hypothesis, plan):
|
140 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
predefined_action_log = """
|
145 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
146 |
+
[Action]: Inspect Script (train.py)
|
147 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
148 |
+
Objective: Understand the training script, including data processing, [...]
|
149 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
150 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
151 |
+
"""
|
152 |
+
|
153 |
+
|
154 |
+
predefined_observation = """
|
155 |
+
Epoch [1/10],
|
156 |
+
Train MSE: 0.543,
|
157 |
+
Test MSE: 0.688
|
158 |
+
Epoch [2/10],
|
159 |
+
Train MSE: 0.242,
|
160 |
+
Test MSE: 0.493\n
|
161 |
+
"""
|
162 |
+
|
163 |
+
# Initialize the global step_index and history
|
164 |
+
process_steps = [
|
165 |
+
{
|
166 |
+
"Action": "Inspect Script Lines (train.py)",
|
167 |
+
"Observation": (
|
168 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
169 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
170 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
171 |
+
"predict exist without implementations."
|
172 |
+
),
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"Action": "Execute Script (train.py)",
|
176 |
+
"Observation": (
|
177 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
178 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
179 |
+
),
|
180 |
+
},
|
181 |
+
{
|
182 |
+
"Action": "Edit Script (train.py)",
|
183 |
+
"Observation": (
|
184 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
185 |
+
"The edited train.py now has clearly defined functions"
|
186 |
+
"for data loading (load_data), model definition (build_model), "
|
187 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
188 |
+
),
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Retrieve Model",
|
192 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"Action": "Execute Script (train.py)",
|
196 |
+
"Observation": (
|
197 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
198 |
+
"the decrease in loss indicates improved model performance."
|
199 |
+
)
|
200 |
+
},
|
201 |
+
{
|
202 |
+
"Action": "Evaluation",
|
203 |
+
"Observation": predefined_observation,
|
204 |
+
}
|
205 |
+
]
|
206 |
+
def info_to_message(info):
|
207 |
+
msg = ""
|
208 |
+
for k, v in info.items():
|
209 |
+
if isinstance(v, dict):
|
210 |
+
tempv = v
|
211 |
+
v = ""
|
212 |
+
for k2, v2 in tempv.items():
|
213 |
+
v += f"{k2}:\n {v2}\n"
|
214 |
+
v = User.indent_text(v, 2)
|
215 |
+
msg += '-' * 64
|
216 |
+
msg += '\n'
|
217 |
+
msg += f"{k}:\n{v}\n"
|
218 |
+
return msg
|
219 |
+
|
220 |
+
|
221 |
+
def handle_example_click(example_index):
|
222 |
+
global index_ex
|
223 |
+
index_ex = example_index
|
224 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
225 |
+
|
226 |
+
# Gradio Interface
|
227 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
228 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
229 |
+
gr.Markdown("### ")
|
230 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
231 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
236 |
+
|
237 |
+
|
238 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
239 |
+
|
240 |
+
|
241 |
+
# Use state variables to store generated hypothesis and experiment plan
|
242 |
+
hypothesis_state = gr.State("")
|
243 |
+
experiment_plan_state = gr.State("")
|
244 |
+
|
245 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
246 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
247 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
248 |
+
with gr.Row():
|
249 |
+
with gr.Column():
|
250 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
251 |
+
|
252 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
253 |
+
with gr.Row():
|
254 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
255 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
256 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
257 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
258 |
+
with gr.Column():
|
259 |
+
with gr.Row(): # Move the button to the top
|
260 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
261 |
+
with gr.Group():
|
262 |
+
gr.Markdown("### 🌟 Research Idea")
|
263 |
+
with gr.Row():
|
264 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
265 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
266 |
+
|
267 |
+
|
268 |
+
# Step 1: Extract Research Elements
|
269 |
+
# extract_button.click(
|
270 |
+
# fn=extract_research_elements,
|
271 |
+
# inputs=paper_text_input,
|
272 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
273 |
+
# )
|
274 |
+
|
275 |
+
generate_button.click(
|
276 |
+
fn=generate_and_store,
|
277 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
278 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
279 |
+
)
|
280 |
+
|
281 |
+
gr.Examples(
|
282 |
+
examples=example_text,
|
283 |
+
inputs=[paper_text_input],
|
284 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
285 |
+
fn=load_example_and_set_index,
|
286 |
+
run_on_click = True,
|
287 |
+
# label="⬇️ Click an example to load"
|
288 |
+
)
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
294 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
295 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
296 |
+
|
297 |
+
with gr.Row():
|
298 |
+
with gr.Column():
|
299 |
+
with gr.Group():
|
300 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
301 |
+
with gr.Row():
|
302 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
303 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
304 |
+
|
305 |
+
with gr.Column():
|
306 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
307 |
+
with gr.Group():
|
308 |
+
gr.Markdown("### Implementation + Execution Log")
|
309 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
310 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
311 |
+
|
312 |
+
with gr.Column():
|
313 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
314 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
315 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
316 |
+
|
317 |
+
hypothesis_state.change(
|
318 |
+
fn=load_phase_2_inputs,
|
319 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
320 |
+
outputs=[idea_input, plan_input, code_display]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Start research agent
|
324 |
+
start_exp_agnet.click(
|
325 |
+
fn=start_experiment_agent,
|
326 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
327 |
+
outputs=[code_display, log, response, feedback]
|
328 |
+
)
|
329 |
+
|
330 |
+
submit_button.click(
|
331 |
+
fn=submit_feedback,
|
332 |
+
inputs=[feedback, log, response],
|
333 |
+
outputs=[log, response, code_display, feedback]
|
334 |
+
)
|
335 |
+
|
336 |
+
# Test
|
337 |
+
if __name__ == "__main__":
|
338 |
+
step_index = 0
|
339 |
+
app.launch(share=True)
|
.history/app_20250404174438.py
ADDED
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# example_text = [load_example(1), load_example(2)]
|
63 |
+
# # WRONG: example_text = [load_example(1), load_example(2)]
|
64 |
+
|
65 |
+
# ✅ FIXED:
|
66 |
+
example_text = []
|
67 |
+
for i in ["1", "2"]:
|
68 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
69 |
+
example_text.append(paper_text)
|
70 |
+
|
71 |
+
# Function to handle example clicks
|
72 |
+
def load_example_and_set_index(paper_text_input):
|
73 |
+
global index_ex, state_example
|
74 |
+
state_example = True
|
75 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
76 |
+
example = load_example(index_ex)
|
77 |
+
|
78 |
+
return example
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
########## Phase 1 ##############
|
83 |
+
|
84 |
+
def extract_research_elements(paper_text):
|
85 |
+
global state_extract, index_ex, state_example
|
86 |
+
if not state_example or paper_text == "":
|
87 |
+
return "", "", "", ""
|
88 |
+
state_extract = True
|
89 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
90 |
+
print("Mismatch detected.")
|
91 |
+
print(paper_text)
|
92 |
+
return "", "", "", ""
|
93 |
+
example = example_data[index_ex]
|
94 |
+
tasks = example['research_tasks']
|
95 |
+
gaps = example['research_gaps']
|
96 |
+
keywords = example['keywords']
|
97 |
+
recent_works = "\n".join(example['recent_works'])
|
98 |
+
return tasks, gaps, keywords, recent_works
|
99 |
+
|
100 |
+
|
101 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
102 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
103 |
+
if (not state_extract or not state_example or paper_text == ""):
|
104 |
+
return "", "", "", ""
|
105 |
+
global state_generate, index_ex
|
106 |
+
state_generate = True
|
107 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
108 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
109 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
110 |
+
|
111 |
+
########## Phase 2 & 3 ##############
|
112 |
+
def start_experiment_agent(hypothesis, plan):
|
113 |
+
if (not state_extract or not state_generate or not state_example):
|
114 |
+
return "", "", ""
|
115 |
+
global state_agent, step_index, state_complete
|
116 |
+
state_agent = True
|
117 |
+
step_index = 0
|
118 |
+
state_complete = False
|
119 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
120 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
121 |
+
|
122 |
+
def submit_feedback(user_feedback, history, previous_response):
|
123 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
124 |
+
return "", "", ""
|
125 |
+
global step_index, state_complete
|
126 |
+
step_index += 1
|
127 |
+
msg = history
|
128 |
+
if step_index < len(process_steps):
|
129 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
130 |
+
response_info = process_steps[step_index]
|
131 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
132 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
133 |
+
step_index += 1
|
134 |
+
msg += response
|
135 |
+
else:
|
136 |
+
state_complete = True
|
137 |
+
response = "Agent Finished."
|
138 |
+
|
139 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
140 |
+
|
141 |
+
def load_phase_2_inputs(hypothesis, plan):
|
142 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
predefined_action_log = """
|
147 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
148 |
+
[Action]: Inspect Script (train.py)
|
149 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
150 |
+
Objective: Understand the training script, including data processing, [...]
|
151 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
152 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
153 |
+
"""
|
154 |
+
|
155 |
+
|
156 |
+
predefined_observation = """
|
157 |
+
Epoch [1/10],
|
158 |
+
Train MSE: 0.543,
|
159 |
+
Test MSE: 0.688
|
160 |
+
Epoch [2/10],
|
161 |
+
Train MSE: 0.242,
|
162 |
+
Test MSE: 0.493\n
|
163 |
+
"""
|
164 |
+
|
165 |
+
# Initialize the global step_index and history
|
166 |
+
process_steps = [
|
167 |
+
{
|
168 |
+
"Action": "Inspect Script Lines (train.py)",
|
169 |
+
"Observation": (
|
170 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
171 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
172 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
173 |
+
"predict exist without implementations."
|
174 |
+
),
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"Action": "Execute Script (train.py)",
|
178 |
+
"Observation": (
|
179 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
180 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
181 |
+
),
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"Action": "Edit Script (train.py)",
|
185 |
+
"Observation": (
|
186 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
187 |
+
"The edited train.py now has clearly defined functions"
|
188 |
+
"for data loading (load_data), model definition (build_model), "
|
189 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
190 |
+
),
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"Action": "Retrieve Model",
|
194 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"Action": "Execute Script (train.py)",
|
198 |
+
"Observation": (
|
199 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
200 |
+
"the decrease in loss indicates improved model performance."
|
201 |
+
)
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"Action": "Evaluation",
|
205 |
+
"Observation": predefined_observation,
|
206 |
+
}
|
207 |
+
]
|
208 |
+
def info_to_message(info):
|
209 |
+
msg = ""
|
210 |
+
for k, v in info.items():
|
211 |
+
if isinstance(v, dict):
|
212 |
+
tempv = v
|
213 |
+
v = ""
|
214 |
+
for k2, v2 in tempv.items():
|
215 |
+
v += f"{k2}:\n {v2}\n"
|
216 |
+
v = User.indent_text(v, 2)
|
217 |
+
msg += '-' * 64
|
218 |
+
msg += '\n'
|
219 |
+
msg += f"{k}:\n{v}\n"
|
220 |
+
return msg
|
221 |
+
|
222 |
+
|
223 |
+
def handle_example_click(example_index):
|
224 |
+
global index_ex
|
225 |
+
index_ex = example_index
|
226 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
227 |
+
|
228 |
+
# Gradio Interface
|
229 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
230 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
231 |
+
gr.Markdown("### ")
|
232 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
233 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
238 |
+
|
239 |
+
|
240 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
241 |
+
|
242 |
+
|
243 |
+
# Use state variables to store generated hypothesis and experiment plan
|
244 |
+
hypothesis_state = gr.State("")
|
245 |
+
experiment_plan_state = gr.State("")
|
246 |
+
|
247 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
248 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
249 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
250 |
+
with gr.Row():
|
251 |
+
with gr.Column():
|
252 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
253 |
+
|
254 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
255 |
+
with gr.Row():
|
256 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
257 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
258 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
259 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
260 |
+
with gr.Column():
|
261 |
+
with gr.Row(): # Move the button to the top
|
262 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
263 |
+
with gr.Group():
|
264 |
+
gr.Markdown("### 🌟 Research Idea")
|
265 |
+
with gr.Row():
|
266 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
267 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
268 |
+
|
269 |
+
|
270 |
+
# Step 1: Extract Research Elements
|
271 |
+
# extract_button.click(
|
272 |
+
# fn=extract_research_elements,
|
273 |
+
# inputs=paper_text_input,
|
274 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
275 |
+
# )
|
276 |
+
|
277 |
+
generate_button.click(
|
278 |
+
fn=generate_and_store,
|
279 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
280 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
281 |
+
)
|
282 |
+
|
283 |
+
gr.Examples(
|
284 |
+
examples=example_text,
|
285 |
+
inputs=[paper_text_input],
|
286 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
287 |
+
fn=load_example_and_set_index,
|
288 |
+
run_on_click = True,
|
289 |
+
# label="⬇️ Click an example to load"
|
290 |
+
)
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
296 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
297 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
298 |
+
|
299 |
+
with gr.Row():
|
300 |
+
with gr.Column():
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
303 |
+
with gr.Row():
|
304 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
305 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
306 |
+
|
307 |
+
with gr.Column():
|
308 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
309 |
+
with gr.Group():
|
310 |
+
gr.Markdown("### Implementation + Execution Log")
|
311 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
312 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
313 |
+
|
314 |
+
with gr.Column():
|
315 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
316 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
317 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
318 |
+
|
319 |
+
hypothesis_state.change(
|
320 |
+
fn=load_phase_2_inputs,
|
321 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
322 |
+
outputs=[idea_input, plan_input, code_display]
|
323 |
+
)
|
324 |
+
|
325 |
+
# Start research agent
|
326 |
+
start_exp_agnet.click(
|
327 |
+
fn=start_experiment_agent,
|
328 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
329 |
+
outputs=[code_display, log, response, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
submit_button.click(
|
333 |
+
fn=submit_feedback,
|
334 |
+
inputs=[feedback, log, response],
|
335 |
+
outputs=[log, response, code_display, feedback]
|
336 |
+
)
|
337 |
+
|
338 |
+
# Test
|
339 |
+
if __name__ == "__main__":
|
340 |
+
step_index = 0
|
341 |
+
app.launch(share=True)
|
.history/app_20250404174439.py
ADDED
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
# example_text = [load_example(1), load_example(2)]
|
63 |
+
# # WRONG: example_text = [load_example(1), load_example(2)]
|
64 |
+
|
65 |
+
# ✅ FIXED:
|
66 |
+
example_text = []
|
67 |
+
for i in ["1", "2"]:
|
68 |
+
paper_text = 'Title:\t' + example_data[i]['title'] + '\n\nAbstract:\t' + example_data[i]['abstract']
|
69 |
+
example_text.append(paper_text)
|
70 |
+
|
71 |
+
# Function to handle example clicks
|
72 |
+
def load_example_and_set_index(paper_text_input):
|
73 |
+
global index_ex, state_example
|
74 |
+
state_example = True
|
75 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
76 |
+
example = load_example(index_ex)
|
77 |
+
|
78 |
+
return example
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
########## Phase 1 ##############
|
83 |
+
|
84 |
+
def extract_research_elements(paper_text):
|
85 |
+
global state_extract, index_ex, state_example
|
86 |
+
if not state_example or paper_text == "":
|
87 |
+
return "", "", "", ""
|
88 |
+
state_extract = True
|
89 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
90 |
+
print("Mismatch detected.")
|
91 |
+
print(paper_text)
|
92 |
+
return "", "", "", ""
|
93 |
+
example = example_data[index_ex]
|
94 |
+
tasks = example['research_tasks']
|
95 |
+
gaps = example['research_gaps']
|
96 |
+
keywords = example['keywords']
|
97 |
+
recent_works = "\n".join(example['recent_works'])
|
98 |
+
return tasks, gaps, keywords, recent_works
|
99 |
+
|
100 |
+
|
101 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
102 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
103 |
+
if (not state_extract or not state_example or paper_text == ""):
|
104 |
+
return "", "", "", ""
|
105 |
+
global state_generate, index_ex
|
106 |
+
state_generate = True
|
107 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
108 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
109 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
110 |
+
|
111 |
+
########## Phase 2 & 3 ##############
|
112 |
+
def start_experiment_agent(hypothesis, plan):
|
113 |
+
if (not state_extract or not state_generate or not state_example):
|
114 |
+
return "", "", ""
|
115 |
+
global state_agent, step_index, state_complete
|
116 |
+
state_agent = True
|
117 |
+
step_index = 0
|
118 |
+
state_complete = False
|
119 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
120 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
121 |
+
|
122 |
+
def submit_feedback(user_feedback, history, previous_response):
|
123 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
124 |
+
return "", "", ""
|
125 |
+
global step_index, state_complete
|
126 |
+
step_index += 1
|
127 |
+
msg = history
|
128 |
+
if step_index < len(process_steps):
|
129 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
130 |
+
response_info = process_steps[step_index]
|
131 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
132 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
133 |
+
step_index += 1
|
134 |
+
msg += response
|
135 |
+
else:
|
136 |
+
state_complete = True
|
137 |
+
response = "Agent Finished."
|
138 |
+
|
139 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
140 |
+
|
141 |
+
def load_phase_2_inputs(hypothesis, plan):
|
142 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
predefined_action_log = """
|
147 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
148 |
+
[Action]: Inspect Script (train.py)
|
149 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
150 |
+
Objective: Understand the training script, including data processing, [...]
|
151 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
152 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
153 |
+
"""
|
154 |
+
|
155 |
+
|
156 |
+
predefined_observation = """
|
157 |
+
Epoch [1/10],
|
158 |
+
Train MSE: 0.543,
|
159 |
+
Test MSE: 0.688
|
160 |
+
Epoch [2/10],
|
161 |
+
Train MSE: 0.242,
|
162 |
+
Test MSE: 0.493\n
|
163 |
+
"""
|
164 |
+
|
165 |
+
# Initialize the global step_index and history
|
166 |
+
process_steps = [
|
167 |
+
{
|
168 |
+
"Action": "Inspect Script Lines (train.py)",
|
169 |
+
"Observation": (
|
170 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
171 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
172 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
173 |
+
"predict exist without implementations."
|
174 |
+
),
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"Action": "Execute Script (train.py)",
|
178 |
+
"Observation": (
|
179 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
180 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
181 |
+
),
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"Action": "Edit Script (train.py)",
|
185 |
+
"Observation": (
|
186 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
187 |
+
"The edited train.py now has clearly defined functions"
|
188 |
+
"for data loading (load_data), model definition (build_model), "
|
189 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
190 |
+
),
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"Action": "Retrieve Model",
|
194 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"Action": "Execute Script (train.py)",
|
198 |
+
"Observation": (
|
199 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
200 |
+
"the decrease in loss indicates improved model performance."
|
201 |
+
)
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"Action": "Evaluation",
|
205 |
+
"Observation": predefined_observation,
|
206 |
+
}
|
207 |
+
]
|
208 |
+
def info_to_message(info):
|
209 |
+
msg = ""
|
210 |
+
for k, v in info.items():
|
211 |
+
if isinstance(v, dict):
|
212 |
+
tempv = v
|
213 |
+
v = ""
|
214 |
+
for k2, v2 in tempv.items():
|
215 |
+
v += f"{k2}:\n {v2}\n"
|
216 |
+
v = User.indent_text(v, 2)
|
217 |
+
msg += '-' * 64
|
218 |
+
msg += '\n'
|
219 |
+
msg += f"{k}:\n{v}\n"
|
220 |
+
return msg
|
221 |
+
|
222 |
+
|
223 |
+
def handle_example_click(example_index):
|
224 |
+
global index_ex
|
225 |
+
index_ex = example_index
|
226 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
227 |
+
|
228 |
+
# Gradio Interface
|
229 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
230 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
231 |
+
gr.Markdown("### ")
|
232 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
233 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
238 |
+
|
239 |
+
|
240 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
241 |
+
|
242 |
+
|
243 |
+
# Use state variables to store generated hypothesis and experiment plan
|
244 |
+
hypothesis_state = gr.State("")
|
245 |
+
experiment_plan_state = gr.State("")
|
246 |
+
|
247 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
248 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
249 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
250 |
+
with gr.Row():
|
251 |
+
with gr.Column():
|
252 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
253 |
+
|
254 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
255 |
+
with gr.Row():
|
256 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
257 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
258 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
259 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
260 |
+
with gr.Column():
|
261 |
+
with gr.Row(): # Move the button to the top
|
262 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
263 |
+
with gr.Group():
|
264 |
+
gr.Markdown("### 🌟 Research Idea")
|
265 |
+
with gr.Row():
|
266 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
267 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
268 |
+
|
269 |
+
|
270 |
+
# Step 1: Extract Research Elements
|
271 |
+
# extract_button.click(
|
272 |
+
# fn=extract_research_elements,
|
273 |
+
# inputs=paper_text_input,
|
274 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
275 |
+
# )
|
276 |
+
|
277 |
+
generate_button.click(
|
278 |
+
fn=generate_and_store,
|
279 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
280 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
281 |
+
)
|
282 |
+
|
283 |
+
gr.Examples(
|
284 |
+
examples=example_text,
|
285 |
+
inputs=[paper_text_input],
|
286 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
287 |
+
fn=load_example_and_set_index,
|
288 |
+
run_on_click = True,
|
289 |
+
# label="⬇️ Click an example to load"
|
290 |
+
)
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
296 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
297 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
298 |
+
|
299 |
+
with gr.Row():
|
300 |
+
with gr.Column():
|
301 |
+
with gr.Group():
|
302 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
303 |
+
with gr.Row():
|
304 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
305 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
306 |
+
|
307 |
+
with gr.Column():
|
308 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
309 |
+
with gr.Group():
|
310 |
+
gr.Markdown("### Implementation + Execution Log")
|
311 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
312 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
313 |
+
|
314 |
+
with gr.Column():
|
315 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
316 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
317 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
318 |
+
|
319 |
+
hypothesis_state.change(
|
320 |
+
fn=load_phase_2_inputs,
|
321 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
322 |
+
outputs=[idea_input, plan_input, code_display]
|
323 |
+
)
|
324 |
+
|
325 |
+
# Start research agent
|
326 |
+
start_exp_agnet.click(
|
327 |
+
fn=start_experiment_agent,
|
328 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
329 |
+
outputs=[code_display, log, response, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
submit_button.click(
|
333 |
+
fn=submit_feedback,
|
334 |
+
inputs=[feedback, log, response],
|
335 |
+
outputs=[log, response, code_display, feedback]
|
336 |
+
)
|
337 |
+
|
338 |
+
# Test
|
339 |
+
if __name__ == "__main__":
|
340 |
+
step_index = 0
|
341 |
+
app.launch(share=True)
|
.history/app_20250404174544.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example or paper_text == ""):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404174545.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example or paper_text == ""):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404174823.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example or paper_text == ""):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404174828.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example or paper_text == ""):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404174831.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example or paper_text == ""):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404174832.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example or paper_text == ""):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175024.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175025.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175029.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175030.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_extract or not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175127.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175128.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start!")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175236.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start✅")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175239.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ⬇️ Click an example at bottom to start ✅")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175250.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ✅ Click an example at bottom to start ⬇️")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
.history/app_20250404175251.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
|
28 |
+
# Load example JSON file
|
29 |
+
def load_example_data():
|
30 |
+
with open("example/example_data.json", "r") as json_file:
|
31 |
+
example_data = json.load(json_file)
|
32 |
+
|
33 |
+
for idx in example_data.keys():
|
34 |
+
try:
|
35 |
+
file = example_data[idx]["code_init"]
|
36 |
+
with open(os.path.join("example", file), "r") as f:
|
37 |
+
example_data[idx]["code_init"] = f.read()
|
38 |
+
except FileNotFoundError:
|
39 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
40 |
+
try:
|
41 |
+
file = example_data[idx]["code_final"]
|
42 |
+
with open(os.path.join("example", file), "r") as f:
|
43 |
+
example_data[idx]["code_final"] = f.read()
|
44 |
+
except FileNotFoundError:
|
45 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
46 |
+
return example_data
|
47 |
+
|
48 |
+
example_data = load_example_data()
|
49 |
+
|
50 |
+
# Function to handle the selection of an example and populate the respective fields
|
51 |
+
def load_example(example_id):
|
52 |
+
global index_ex
|
53 |
+
index_ex = str(example_id)
|
54 |
+
example = example_data[index_ex]
|
55 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
+
|
64 |
+
|
65 |
+
# Function to handle example clicks
|
66 |
+
def load_example_and_set_index(paper_text_input):
|
67 |
+
global index_ex, state_example
|
68 |
+
state_example = True
|
69 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
+
|
72 |
+
return example
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
########## Phase 1 ##############
|
77 |
+
|
78 |
+
def extract_research_elements(paper_text):
|
79 |
+
global state_extract, index_ex, state_example
|
80 |
+
if not state_example or paper_text == "":
|
81 |
+
return "", "", "", ""
|
82 |
+
state_extract = True
|
83 |
+
if not paper_text.strip().startswith("Title:\t" + example_data[index_ex]["title"]):
|
84 |
+
print("Mismatch detected.")
|
85 |
+
print(paper_text)
|
86 |
+
return "", "", "", ""
|
87 |
+
example = example_data[index_ex]
|
88 |
+
tasks = example['research_tasks']
|
89 |
+
gaps = example['research_gaps']
|
90 |
+
keywords = example['keywords']
|
91 |
+
recent_works = "\n".join(example['recent_works'])
|
92 |
+
return tasks, gaps, keywords, recent_works
|
93 |
+
|
94 |
+
|
95 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
+
return "", "", "", ""
|
99 |
+
global state_generate, index_ex
|
100 |
+
state_generate = True
|
101 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
102 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
103 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
104 |
+
|
105 |
+
########## Phase 2 & 3 ##############
|
106 |
+
def start_experiment_agent(hypothesis, plan):
|
107 |
+
if (not state_extract or not state_generate or not state_example):
|
108 |
+
return "", "", ""
|
109 |
+
global state_agent, step_index, state_complete
|
110 |
+
state_agent = True
|
111 |
+
step_index = 0
|
112 |
+
state_complete = False
|
113 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
114 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
115 |
+
|
116 |
+
def submit_feedback(user_feedback, history, previous_response):
|
117 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
118 |
+
return "", "", ""
|
119 |
+
global step_index, state_complete
|
120 |
+
step_index += 1
|
121 |
+
msg = history
|
122 |
+
if step_index < len(process_steps):
|
123 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
124 |
+
response_info = process_steps[step_index]
|
125 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
126 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
127 |
+
step_index += 1
|
128 |
+
msg += response
|
129 |
+
else:
|
130 |
+
state_complete = True
|
131 |
+
response = "Agent Finished."
|
132 |
+
|
133 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
134 |
+
|
135 |
+
def load_phase_2_inputs(hypothesis, plan):
|
136 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
predefined_action_log = """
|
141 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
142 |
+
[Action]: Inspect Script (train.py)
|
143 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
144 |
+
Objective: Understand the training script, including data processing, [...]
|
145 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
146 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
147 |
+
"""
|
148 |
+
|
149 |
+
|
150 |
+
predefined_observation = """
|
151 |
+
Epoch [1/10],
|
152 |
+
Train MSE: 0.543,
|
153 |
+
Test MSE: 0.688
|
154 |
+
Epoch [2/10],
|
155 |
+
Train MSE: 0.242,
|
156 |
+
Test MSE: 0.493\n
|
157 |
+
"""
|
158 |
+
|
159 |
+
# Initialize the global step_index and history
|
160 |
+
process_steps = [
|
161 |
+
{
|
162 |
+
"Action": "Inspect Script Lines (train.py)",
|
163 |
+
"Observation": (
|
164 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
165 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
166 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
167 |
+
"predict exist without implementations."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Execute Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
174 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
175 |
+
),
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"Action": "Edit Script (train.py)",
|
179 |
+
"Observation": (
|
180 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
181 |
+
"The edited train.py now has clearly defined functions"
|
182 |
+
"for data loading (load_data), model definition (build_model), "
|
183 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
184 |
+
),
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"Action": "Retrieve Model",
|
188 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Execute Script (train.py)",
|
192 |
+
"Observation": (
|
193 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
194 |
+
"the decrease in loss indicates improved model performance."
|
195 |
+
)
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"Action": "Evaluation",
|
199 |
+
"Observation": predefined_observation,
|
200 |
+
}
|
201 |
+
]
|
202 |
+
def info_to_message(info):
|
203 |
+
msg = ""
|
204 |
+
for k, v in info.items():
|
205 |
+
if isinstance(v, dict):
|
206 |
+
tempv = v
|
207 |
+
v = ""
|
208 |
+
for k2, v2 in tempv.items():
|
209 |
+
v += f"{k2}:\n {v2}\n"
|
210 |
+
v = User.indent_text(v, 2)
|
211 |
+
msg += '-' * 64
|
212 |
+
msg += '\n'
|
213 |
+
msg += f"{k}:\n{v}\n"
|
214 |
+
return msg
|
215 |
+
|
216 |
+
|
217 |
+
def handle_example_click(example_index):
|
218 |
+
global index_ex
|
219 |
+
index_ex = example_index
|
220 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
221 |
+
|
222 |
+
# Gradio Interface
|
223 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
224 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
225 |
+
gr.Markdown("### ")
|
226 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
227 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
+
|
233 |
+
|
234 |
+
gr.Markdown("## ✅ Click an example at bottom to start ⬇️")
|
235 |
+
|
236 |
+
|
237 |
+
# Use state variables to store generated hypothesis and experiment plan
|
238 |
+
hypothesis_state = gr.State("")
|
239 |
+
experiment_plan_state = gr.State("")
|
240 |
+
|
241 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
242 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
243 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
244 |
+
with gr.Row():
|
245 |
+
with gr.Column():
|
246 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
+
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
+
with gr.Row():
|
250 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
252 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=False)
|
253 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=False)
|
254 |
+
with gr.Column():
|
255 |
+
with gr.Row(): # Move the button to the top
|
256 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
257 |
+
with gr.Group():
|
258 |
+
gr.Markdown("### 🌟 Research Idea")
|
259 |
+
with gr.Row():
|
260 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
261 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
262 |
+
|
263 |
+
|
264 |
+
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
+
|
271 |
+
generate_button.click(
|
272 |
+
fn=generate_and_store,
|
273 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
274 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
275 |
+
)
|
276 |
+
|
277 |
+
gr.Examples(
|
278 |
+
examples=example_text,
|
279 |
+
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
+
fn=load_example_and_set_index,
|
282 |
+
run_on_click = True,
|
283 |
+
# label="⬇️ Click an example to load"
|
284 |
+
)
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
290 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
291 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
292 |
+
|
293 |
+
with gr.Row():
|
294 |
+
with gr.Column():
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
297 |
+
with gr.Row():
|
298 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
299 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
300 |
+
|
301 |
+
with gr.Column():
|
302 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
303 |
+
with gr.Group():
|
304 |
+
gr.Markdown("### Implementation + Execution Log")
|
305 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
306 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
307 |
+
|
308 |
+
with gr.Column():
|
309 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
310 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
311 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
312 |
+
|
313 |
+
hypothesis_state.change(
|
314 |
+
fn=load_phase_2_inputs,
|
315 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
316 |
+
outputs=[idea_input, plan_input, code_display]
|
317 |
+
)
|
318 |
+
|
319 |
+
# Start research agent
|
320 |
+
start_exp_agnet.click(
|
321 |
+
fn=start_experiment_agent,
|
322 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
323 |
+
outputs=[code_display, log, response, feedback]
|
324 |
+
)
|
325 |
+
|
326 |
+
submit_button.click(
|
327 |
+
fn=submit_feedback,
|
328 |
+
inputs=[feedback, log, response],
|
329 |
+
outputs=[log, response, code_display, feedback]
|
330 |
+
)
|
331 |
+
|
332 |
+
# Test
|
333 |
+
if __name__ == "__main__":
|
334 |
+
step_index = 0
|
335 |
+
app.launch(share=True)
|
app.py
CHANGED
@@ -24,6 +24,7 @@ example_text = [
|
|
24 |
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
]
|
26 |
|
|
|
27 |
# Load example JSON file
|
28 |
def load_example_data():
|
29 |
with open("example/example_data.json", "r") as json_file:
|
@@ -52,18 +53,23 @@ def load_example(example_id):
|
|
52 |
index_ex = str(example_id)
|
53 |
example = example_data[index_ex]
|
54 |
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
-
example_text = [load_example(1), load_example(2)]
|
58 |
|
59 |
# Function to handle example clicks
|
60 |
def load_example_and_set_index(paper_text_input):
|
61 |
global index_ex, state_example
|
62 |
state_example = True
|
63 |
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
-
|
65 |
|
66 |
-
return
|
67 |
|
68 |
|
69 |
|
@@ -88,7 +94,7 @@ def extract_research_elements(paper_text):
|
|
88 |
|
89 |
# Step 2: Generate Research Hypothesis and Experiment Plan
|
90 |
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
91 |
-
if (not
|
92 |
return "", "", "", ""
|
93 |
global state_generate, index_ex
|
94 |
state_generate = True
|
@@ -225,7 +231,7 @@ with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Defaul
|
|
225 |
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
226 |
|
227 |
|
228 |
-
gr.Markdown("##
|
229 |
|
230 |
|
231 |
# Use state variables to store generated hypothesis and experiment plan
|
@@ -239,7 +245,7 @@ with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Defaul
|
|
239 |
with gr.Column():
|
240 |
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
241 |
|
242 |
-
extract_button = gr.Button("🔍 Extract Research Elements")
|
243 |
with gr.Row():
|
244 |
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
245 |
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
@@ -256,11 +262,11 @@ with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Defaul
|
|
256 |
|
257 |
|
258 |
# Step 1: Extract Research Elements
|
259 |
-
extract_button.click(
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
)
|
264 |
|
265 |
generate_button.click(
|
266 |
fn=generate_and_store,
|
@@ -271,7 +277,7 @@ with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Defaul
|
|
271 |
gr.Examples(
|
272 |
examples=example_text,
|
273 |
inputs=[paper_text_input],
|
274 |
-
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output
|
275 |
fn=load_example_and_set_index,
|
276 |
run_on_click = True,
|
277 |
# label="⬇️ Click an example to load"
|
|
|
24 |
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
]
|
26 |
|
27 |
+
|
28 |
# Load example JSON file
|
29 |
def load_example_data():
|
30 |
with open("example/example_data.json", "r") as json_file:
|
|
|
53 |
index_ex = str(example_id)
|
54 |
example = example_data[index_ex]
|
55 |
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
56 |
+
tasks = example['research_tasks']
|
57 |
+
gaps = example['research_gaps']
|
58 |
+
keywords = example['keywords']
|
59 |
+
recent_works = "\n".join(example['recent_works'])
|
60 |
+
return [paper_text, tasks, gaps, keywords, recent_works]
|
61 |
+
|
62 |
+
example_text = [load_example(1)[0], load_example(2)[0]]
|
63 |
|
|
|
64 |
|
65 |
# Function to handle example clicks
|
66 |
def load_example_and_set_index(paper_text_input):
|
67 |
global index_ex, state_example
|
68 |
state_example = True
|
69 |
index_ex = str(example_text.index(paper_text_input) + 1)
|
70 |
+
example = load_example(index_ex)
|
71 |
|
72 |
+
return example
|
73 |
|
74 |
|
75 |
|
|
|
94 |
|
95 |
# Step 2: Generate Research Hypothesis and Experiment Plan
|
96 |
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
97 |
+
if (not state_example):
|
98 |
return "", "", "", ""
|
99 |
global state_generate, index_ex
|
100 |
state_generate = True
|
|
|
231 |
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
232 |
|
233 |
|
234 |
+
gr.Markdown("## ✅ Click an example at bottom to start ⬇️")
|
235 |
|
236 |
|
237 |
# Use state variables to store generated hypothesis and experiment plan
|
|
|
245 |
with gr.Column():
|
246 |
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text", interactive=False)
|
247 |
|
248 |
+
# extract_button = gr.Button("🔍 Extract Research Elements")
|
249 |
with gr.Row():
|
250 |
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=False)
|
251 |
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=False)
|
|
|
262 |
|
263 |
|
264 |
# Step 1: Extract Research Elements
|
265 |
+
# extract_button.click(
|
266 |
+
# fn=extract_research_elements,
|
267 |
+
# inputs=paper_text_input,
|
268 |
+
# outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
269 |
+
# )
|
270 |
|
271 |
generate_button.click(
|
272 |
fn=generate_and_store,
|
|
|
277 |
gr.Examples(
|
278 |
examples=example_text,
|
279 |
inputs=[paper_text_input],
|
280 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
281 |
fn=load_example_and_set_index,
|
282 |
run_on_click = True,
|
283 |
# label="⬇️ Click an example to load"
|