Spaces:
Running
Running
mismatch patch
Browse files- .history/app_20250403135829.py +324 -0
- .history/app_20250403154819.py +326 -0
- .history/app_20250403154820.py +326 -0
- .history/app_20250403154830.py +326 -0
- .history/app_20250403154831.py +326 -0
- .history/app_20250403154932.py +326 -0
- .history/app_20250403154948.py +326 -0
- .history/app_20250403154950.py +326 -0
- .history/app_20250403154951.py +326 -0
- .history/app_20250403155112.py +326 -0
- .history/app_20250403155113.py +326 -0
- .history/app_20250403155133.py +326 -0
- .history/app_20250403155141.py +326 -0
- .history/app_20250403155219.py +326 -0
- .history/app_20250403155224.py +326 -0
- .history/app_20250403155226.py +326 -0
- .history/app_20250403155927.py +326 -0
- .history/app_20250403160042.py +326 -0
- .history/app_20250403160050.py +326 -0
- .history/app_20250403160802.py +327 -0
- app.py +7 -4
.history/app_20250403135829.py
ADDED
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/).</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Examples(
|
255 |
+
examples=example_text,
|
256 |
+
inputs=[paper_text_input],
|
257 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
258 |
+
fn=load_example_and_set_index,
|
259 |
+
run_on_click = True,
|
260 |
+
label="⬇️ Click an example to load"
|
261 |
+
)
|
262 |
+
|
263 |
+
# Step 1: Extract Research Elements
|
264 |
+
extract_button.click(
|
265 |
+
fn=extract_research_elements,
|
266 |
+
inputs=paper_text_input,
|
267 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
268 |
+
)
|
269 |
+
|
270 |
+
generate_button.click(
|
271 |
+
fn=generate_and_store,
|
272 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
273 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
274 |
+
)
|
275 |
+
|
276 |
+
|
277 |
+
|
278 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
279 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
280 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
281 |
+
|
282 |
+
with gr.Row():
|
283 |
+
with gr.Column():
|
284 |
+
with gr.Group():
|
285 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
286 |
+
with gr.Row():
|
287 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
288 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
289 |
+
|
290 |
+
with gr.Column():
|
291 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
292 |
+
with gr.Group():
|
293 |
+
gr.Markdown("### Implementation + Execution Log")
|
294 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
295 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
296 |
+
|
297 |
+
with gr.Column():
|
298 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
299 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
300 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
301 |
+
|
302 |
+
hypothesis_state.change(
|
303 |
+
fn=load_phase_2_inputs,
|
304 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
305 |
+
outputs=[idea_input, plan_input, code_display]
|
306 |
+
)
|
307 |
+
|
308 |
+
# Start research agent
|
309 |
+
start_exp_agnet.click(
|
310 |
+
fn=start_experiment_agent,
|
311 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
312 |
+
outputs=[code_display, log, response, feedback]
|
313 |
+
)
|
314 |
+
|
315 |
+
submit_button.click(
|
316 |
+
fn=submit_feedback,
|
317 |
+
inputs=[feedback, log, response],
|
318 |
+
outputs=[log, response, code_display, feedback]
|
319 |
+
)
|
320 |
+
|
321 |
+
# Test
|
322 |
+
if __name__ == "__main__":
|
323 |
+
step_index = 0
|
324 |
+
app.launch()
|
.history/app_20250403154819.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("### ⬇️ Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154820.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("### ⬇️ Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154830.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154831.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154932.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154948.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ <span style='color:Orange;'>Click an example to load</san>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154950.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ <span style='color:Orange;'>Click an example to load</span>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403154951.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ <span style='color:Orange;'>Click an example to load</span>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155112.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ <span style='color:Red;'>Click an example to load</span>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155113.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️ <span style='color:Red;'>Click an example to load</span>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155133.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("⬇️ <span style='color:Red;'>Click an example to load</span>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155141.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("⬇️ <span style='color:Red;'>Click an example to load</span>")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155219.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️Click an example to load:")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155224.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155226.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403155927.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default(), css=".gr-examples-label {display: none;}") as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403160042.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403160050.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if paper_text != load_example(index_ex):
|
78 |
+
return "", "", "", ""
|
79 |
+
example = example_data[index_ex]
|
80 |
+
tasks = example['research_tasks']
|
81 |
+
gaps = example['research_gaps']
|
82 |
+
keywords = example['keywords']
|
83 |
+
recent_works = "\n".join(example['recent_works'])
|
84 |
+
return tasks, gaps, keywords, recent_works
|
85 |
+
|
86 |
+
|
87 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
88 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
89 |
+
if (not state_extract or not state_example or paper_text == ""):
|
90 |
+
return "", "", "", ""
|
91 |
+
global state_generate, index_ex
|
92 |
+
state_generate = True
|
93 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
94 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
95 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
96 |
+
|
97 |
+
########## Phase 2 & 3 ##############
|
98 |
+
def start_experiment_agent(hypothesis, plan):
|
99 |
+
if (not state_extract or not state_generate or not state_example):
|
100 |
+
return "", "", ""
|
101 |
+
global state_agent, step_index, state_complete
|
102 |
+
state_agent = True
|
103 |
+
step_index = 0
|
104 |
+
state_complete = False
|
105 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
106 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
107 |
+
|
108 |
+
def submit_feedback(user_feedback, history, previous_response):
|
109 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
110 |
+
return "", "", ""
|
111 |
+
global step_index, state_complete
|
112 |
+
step_index += 1
|
113 |
+
msg = history
|
114 |
+
if step_index < len(process_steps):
|
115 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
116 |
+
response_info = process_steps[step_index]
|
117 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
118 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
119 |
+
step_index += 1
|
120 |
+
msg += response
|
121 |
+
else:
|
122 |
+
state_complete = True
|
123 |
+
response = "Agent Finished."
|
124 |
+
|
125 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
126 |
+
|
127 |
+
def load_phase_2_inputs(hypothesis, plan):
|
128 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
predefined_action_log = """
|
133 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
134 |
+
[Action]: Inspect Script (train.py)
|
135 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
136 |
+
Objective: Understand the training script, including data processing, [...]
|
137 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
138 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
139 |
+
"""
|
140 |
+
|
141 |
+
|
142 |
+
predefined_observation = """
|
143 |
+
Epoch [1/10],
|
144 |
+
Train MSE: 0.543,
|
145 |
+
Test MSE: 0.688
|
146 |
+
Epoch [2/10],
|
147 |
+
Train MSE: 0.242,
|
148 |
+
Test MSE: 0.493\n
|
149 |
+
"""
|
150 |
+
|
151 |
+
# Initialize the global step_index and history
|
152 |
+
process_steps = [
|
153 |
+
{
|
154 |
+
"Action": "Inspect Script Lines (train.py)",
|
155 |
+
"Observation": (
|
156 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
157 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
158 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
159 |
+
"predict exist without implementations."
|
160 |
+
),
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"Action": "Execute Script (train.py)",
|
164 |
+
"Observation": (
|
165 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
166 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
167 |
+
),
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"Action": "Edit Script (train.py)",
|
171 |
+
"Observation": (
|
172 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
173 |
+
"The edited train.py now has clearly defined functions"
|
174 |
+
"for data loading (load_data), model definition (build_model), "
|
175 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
176 |
+
),
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"Action": "Retrieve Model",
|
180 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"Action": "Execute Script (train.py)",
|
184 |
+
"Observation": (
|
185 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
186 |
+
"the decrease in loss indicates improved model performance."
|
187 |
+
)
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"Action": "Evaluation",
|
191 |
+
"Observation": predefined_observation,
|
192 |
+
}
|
193 |
+
]
|
194 |
+
def info_to_message(info):
|
195 |
+
msg = ""
|
196 |
+
for k, v in info.items():
|
197 |
+
if isinstance(v, dict):
|
198 |
+
tempv = v
|
199 |
+
v = ""
|
200 |
+
for k2, v2 in tempv.items():
|
201 |
+
v += f"{k2}:\n {v2}\n"
|
202 |
+
v = User.indent_text(v, 2)
|
203 |
+
msg += '-' * 64
|
204 |
+
msg += '\n'
|
205 |
+
msg += f"{k}:\n{v}\n"
|
206 |
+
return msg
|
207 |
+
|
208 |
+
|
209 |
+
def handle_example_click(example_index):
|
210 |
+
global index_ex
|
211 |
+
index_ex = example_index
|
212 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
+
|
214 |
+
# Gradio Interface
|
215 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
216 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
+
gr.Markdown("### ")
|
218 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
# Use state variables to store generated hypothesis and experiment plan
|
229 |
+
hypothesis_state = gr.State("")
|
230 |
+
experiment_plan_state = gr.State("")
|
231 |
+
|
232 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
233 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
234 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
235 |
+
|
236 |
+
with gr.Row():
|
237 |
+
with gr.Column():
|
238 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
239 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
240 |
+
with gr.Row():
|
241 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
242 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
243 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
244 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
245 |
+
with gr.Column():
|
246 |
+
with gr.Row(): # Move the button to the top
|
247 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
248 |
+
with gr.Group():
|
249 |
+
gr.Markdown("### 🌟 Research Idea")
|
250 |
+
with gr.Row():
|
251 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
253 |
+
|
254 |
+
gr.Markdown("## ⬇️Click an example to load")
|
255 |
+
|
256 |
+
gr.Examples(
|
257 |
+
examples=example_text,
|
258 |
+
inputs=[paper_text_input],
|
259 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
260 |
+
fn=load_example_and_set_index,
|
261 |
+
run_on_click = True,
|
262 |
+
# label="⬇️ Click an example to load"
|
263 |
+
)
|
264 |
+
|
265 |
+
# Step 1: Extract Research Elements
|
266 |
+
extract_button.click(
|
267 |
+
fn=extract_research_elements,
|
268 |
+
inputs=paper_text_input,
|
269 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
270 |
+
)
|
271 |
+
|
272 |
+
generate_button.click(
|
273 |
+
fn=generate_and_store,
|
274 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
275 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
276 |
+
)
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
281 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
282 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column():
|
286 |
+
with gr.Group():
|
287 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
288 |
+
with gr.Row():
|
289 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
290 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
291 |
+
|
292 |
+
with gr.Column():
|
293 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
294 |
+
with gr.Group():
|
295 |
+
gr.Markdown("### Implementation + Execution Log")
|
296 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
297 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
298 |
+
|
299 |
+
with gr.Column():
|
300 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
301 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
302 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
303 |
+
|
304 |
+
hypothesis_state.change(
|
305 |
+
fn=load_phase_2_inputs,
|
306 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
307 |
+
outputs=[idea_input, plan_input, code_display]
|
308 |
+
)
|
309 |
+
|
310 |
+
# Start research agent
|
311 |
+
start_exp_agnet.click(
|
312 |
+
fn=start_experiment_agent,
|
313 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
314 |
+
outputs=[code_display, log, response, feedback]
|
315 |
+
)
|
316 |
+
|
317 |
+
submit_button.click(
|
318 |
+
fn=submit_feedback,
|
319 |
+
inputs=[feedback, log, response],
|
320 |
+
outputs=[log, response, code_display, feedback]
|
321 |
+
)
|
322 |
+
|
323 |
+
# Test
|
324 |
+
if __name__ == "__main__":
|
325 |
+
step_index = 0
|
326 |
+
app.launch()
|
.history/app_20250403160802.py
ADDED
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pathlib import Path
|
3 |
+
from reactagent.environment import Environment
|
4 |
+
from reactagent.agents.agent_research import ResearchAgent
|
5 |
+
from reactagent.runner import create_parser
|
6 |
+
from reactagent import llm
|
7 |
+
from reactagent.users.user import User
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# Global variables to store session state
|
13 |
+
env = None
|
14 |
+
agent = None
|
15 |
+
state_example = False
|
16 |
+
state_extract = False
|
17 |
+
state_generate = False
|
18 |
+
state_agent = False
|
19 |
+
state_complete = False
|
20 |
+
index_ex = "1"
|
21 |
+
|
22 |
+
example_text = [
|
23 |
+
"Research Paper 1: Dataset and Baseline for Automatic Student Feedback Analysis",
|
24 |
+
"Research Paper 2: An Empirical Study on the Impact of Code Review on Software Quality"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Load example JSON file
|
28 |
+
def load_example_data():
|
29 |
+
with open("example/example_data.json", "r") as json_file:
|
30 |
+
example_data = json.load(json_file)
|
31 |
+
|
32 |
+
for idx in example_data.keys():
|
33 |
+
try:
|
34 |
+
file = example_data[idx]["code_init"]
|
35 |
+
with open(os.path.join("example", file), "r") as f:
|
36 |
+
example_data[idx]["code_init"] = f.read()
|
37 |
+
except FileNotFoundError:
|
38 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
39 |
+
try:
|
40 |
+
file = example_data[idx]["code_final"]
|
41 |
+
with open(os.path.join("example", file), "r") as f:
|
42 |
+
example_data[idx]["code_final"] = f.read()
|
43 |
+
except FileNotFoundError:
|
44 |
+
print(f"File not found: {file}. Skipping key: {idx}")
|
45 |
+
return example_data
|
46 |
+
|
47 |
+
example_data = load_example_data()
|
48 |
+
|
49 |
+
# Function to handle the selection of an example and populate the respective fields
|
50 |
+
def load_example(example_id):
|
51 |
+
global index_ex
|
52 |
+
index_ex = str(example_id)
|
53 |
+
example = example_data[index_ex]
|
54 |
+
paper_text = 'Title:\t' + example['title'] + '\n\nAbstract:\t' + example['abstract']
|
55 |
+
return paper_text
|
56 |
+
|
57 |
+
example_text = [load_example(1), load_example(2)]
|
58 |
+
|
59 |
+
# Function to handle example clicks
|
60 |
+
def load_example_and_set_index(paper_text_input):
|
61 |
+
global index_ex, state_example
|
62 |
+
state_example = True
|
63 |
+
index_ex = str(example_text.index(paper_text_input) + 1)
|
64 |
+
paper_text = load_example(index_ex)
|
65 |
+
|
66 |
+
return paper_text, "", "", "", "", "", ""
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
########## Phase 1 ##############
|
71 |
+
|
72 |
+
def extract_research_elements(paper_text):
|
73 |
+
global state_extract, index_ex, state_example
|
74 |
+
if not state_example or paper_text == "":
|
75 |
+
return "", "", "", ""
|
76 |
+
state_extract = True
|
77 |
+
if not paper_text.startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
+
return "", "", "", ""
|
80 |
+
example = example_data[index_ex]
|
81 |
+
tasks = example['research_tasks']
|
82 |
+
gaps = example['research_gaps']
|
83 |
+
keywords = example['keywords']
|
84 |
+
recent_works = "\n".join(example['recent_works'])
|
85 |
+
return tasks, gaps, keywords, recent_works
|
86 |
+
|
87 |
+
|
88 |
+
# Step 2: Generate Research Hypothesis and Experiment Plan
|
89 |
+
def generate_and_store(paper_text, tasks, gaps, keywords, recent_works):
|
90 |
+
if (not state_extract or not state_example or paper_text == ""):
|
91 |
+
return "", "", "", ""
|
92 |
+
global state_generate, index_ex
|
93 |
+
state_generate = True
|
94 |
+
hypothesis = example_data[index_ex]['hypothesis']
|
95 |
+
experiment_plan = example_data[index_ex]['experiment_plan']
|
96 |
+
return hypothesis, experiment_plan, hypothesis, experiment_plan
|
97 |
+
|
98 |
+
########## Phase 2 & 3 ##############
|
99 |
+
def start_experiment_agent(hypothesis, plan):
|
100 |
+
if (not state_extract or not state_generate or not state_example):
|
101 |
+
return "", "", ""
|
102 |
+
global state_agent, step_index, state_complete
|
103 |
+
state_agent = True
|
104 |
+
step_index = 0
|
105 |
+
state_complete = False
|
106 |
+
# predefined_message = f"Implement the following hypothesis and experiment plan:\n\nHypothesis:\n{hypothesis}\n\nExperiment Plan:\n{plan}"
|
107 |
+
return example_data[index_ex]['code_init'], predefined_action_log, "", ""
|
108 |
+
|
109 |
+
def submit_feedback(user_feedback, history, previous_response):
|
110 |
+
if (not state_extract or not state_generate or not state_agent or not state_example):
|
111 |
+
return "", "", ""
|
112 |
+
global step_index, state_complete
|
113 |
+
step_index += 1
|
114 |
+
msg = history
|
115 |
+
if step_index < len(process_steps):
|
116 |
+
msg += previous_response + "\nUser feedback:" + user_feedback + "\n\n"
|
117 |
+
response_info = process_steps[step_index]
|
118 |
+
response = info_to_message(response_info) # Convert dictionary to formatted string
|
119 |
+
response += "Please provide feedback based on the history, response entries, and observation, and questions: "
|
120 |
+
step_index += 1
|
121 |
+
msg += response
|
122 |
+
else:
|
123 |
+
state_complete = True
|
124 |
+
response = "Agent Finished."
|
125 |
+
|
126 |
+
return msg, response, example_data[index_ex]['code_init'] if state_complete else example_data[index_ex]['code_final'], ""
|
127 |
+
|
128 |
+
def load_phase_2_inputs(hypothesis, plan):
|
129 |
+
return hypothesis, plan, "# Code implementation will be displayed here after Start ExperimentAgent."
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
predefined_action_log = """
|
134 |
+
[Reasoning]: To understand the initial structure and functionality of train.py for effective improvements.
|
135 |
+
[Action]: Inspect Script (train.py)
|
136 |
+
Input: {"script_name": "train.py", "start_line_number": "1", "end_line_number": "74"}
|
137 |
+
Objective: Understand the training script, including data processing, [...]
|
138 |
+
[Observation]: The train.py script imports [...]. Sets random seeds [...]. Defines [...] Placeholder functions [...] exist without implementation. [...]
|
139 |
+
[Feedback]: The script structure is clear, but key functions (train_model, predict) need proper implementation for proposed model training and prediction.\n
|
140 |
+
"""
|
141 |
+
|
142 |
+
|
143 |
+
predefined_observation = """
|
144 |
+
Epoch [1/10],
|
145 |
+
Train MSE: 0.543,
|
146 |
+
Test MSE: 0.688
|
147 |
+
Epoch [2/10],
|
148 |
+
Train MSE: 0.242,
|
149 |
+
Test MSE: 0.493\n
|
150 |
+
"""
|
151 |
+
|
152 |
+
# Initialize the global step_index and history
|
153 |
+
process_steps = [
|
154 |
+
{
|
155 |
+
"Action": "Inspect Script Lines (train.py)",
|
156 |
+
"Observation": (
|
157 |
+
"The train.py script imports necessary libraries (e.g., pandas, sklearn, torch). "
|
158 |
+
"Sets random seeds for reproducibility. Defines compute_metrics_for_regression function "
|
159 |
+
"to calculate RMSE for different dimensions. Placeholder functions train_model and "
|
160 |
+
"predict exist without implementations."
|
161 |
+
),
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"Action": "Execute Script (train.py)",
|
165 |
+
"Observation": (
|
166 |
+
"The script executed successfully. Generated embeddings using the BERT model. Completed "
|
167 |
+
"the training process without errors. Metrics calculation placeholders indicated areas needing implementation."
|
168 |
+
),
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"Action": "Edit Script (train.py)",
|
172 |
+
"Observation": (
|
173 |
+
"Edited train.py to separate data loading, model definition, training loop, and evaluation into distinct functions. "
|
174 |
+
"The edited train.py now has clearly defined functions"
|
175 |
+
"for data loading (load_data), model definition (build_model), "
|
176 |
+
"training (train_model), and evaluation (evaluate_model). Similarly, eval.py is reorganized to load the model and perform predictions efficiently."
|
177 |
+
),
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"Action": "Retrieve Model",
|
181 |
+
"Observation": "CNN and BiLSTM retrieved.",
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"Action": "Execute Script (train.py)",
|
185 |
+
"Observation": (
|
186 |
+
"The model trained over the specified number of epochs. Training and validation loss values are recorded for each epoch, "
|
187 |
+
"the decrease in loss indicates improved model performance."
|
188 |
+
)
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"Action": "Evaluation",
|
192 |
+
"Observation": predefined_observation,
|
193 |
+
}
|
194 |
+
]
|
195 |
+
def info_to_message(info):
|
196 |
+
msg = ""
|
197 |
+
for k, v in info.items():
|
198 |
+
if isinstance(v, dict):
|
199 |
+
tempv = v
|
200 |
+
v = ""
|
201 |
+
for k2, v2 in tempv.items():
|
202 |
+
v += f"{k2}:\n {v2}\n"
|
203 |
+
v = User.indent_text(v, 2)
|
204 |
+
msg += '-' * 64
|
205 |
+
msg += '\n'
|
206 |
+
msg += f"{k}:\n{v}\n"
|
207 |
+
return msg
|
208 |
+
|
209 |
+
|
210 |
+
def handle_example_click(example_index):
|
211 |
+
global index_ex
|
212 |
+
index_ex = example_index
|
213 |
+
return load_example(index_ex) # Simply return the text to display it in the textbox
|
214 |
+
|
215 |
+
# Gradio Interface
|
216 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
217 |
+
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
218 |
+
gr.Markdown("### ")
|
219 |
+
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
gr.Markdown("MLR-Copilot is a framework where LLMs mimic researchers’ thought processes, designed to enhance the productivity of machine learning research by automating the generation and implementation of research ideas. It begins with a research paper, autonomously generating and validating these ideas, while incorporating human feedback to help reach executable research outcomes.")
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
# Use state variables to store generated hypothesis and experiment plan
|
230 |
+
hypothesis_state = gr.State("")
|
231 |
+
experiment_plan_state = gr.State("")
|
232 |
+
|
233 |
+
########## Phase 1: Research Idea Generation Tab ##############
|
234 |
+
with gr.Tab("💡Stage 1: Research Idea Generation"):
|
235 |
+
gr.Markdown("### Extract Research Elements and Generate Research Ideas")
|
236 |
+
|
237 |
+
with gr.Row():
|
238 |
+
with gr.Column():
|
239 |
+
paper_text_input = gr.Textbox(value="", lines=10, label="📑 Research Paper Text")
|
240 |
+
extract_button = gr.Button("🔍 Extract Research Elements")
|
241 |
+
with gr.Row():
|
242 |
+
tasks_output = gr.Textbox(placeholder="Research task definition", label="Research Tasks", lines=2, interactive=True)
|
243 |
+
gaps_output = gr.Textbox(placeholder="Research gaps of current works", label="Research Gaps", lines=2, interactive=True)
|
244 |
+
keywords_output = gr.Textbox(placeholder="Paper keywords", label="Keywords", lines=2, interactive=True)
|
245 |
+
recent_works_output = gr.Textbox(placeholder="Recent works extracted from Semantic Scholar", label="Recent Works", lines=2, interactive=True)
|
246 |
+
with gr.Column():
|
247 |
+
with gr.Row(): # Move the button to the top
|
248 |
+
generate_button = gr.Button("✍️ Generate Research Hypothesis & Experiment Plan")
|
249 |
+
with gr.Group():
|
250 |
+
gr.Markdown("### 🌟 Research Idea")
|
251 |
+
with gr.Row():
|
252 |
+
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
253 |
+
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
254 |
+
|
255 |
+
gr.Markdown("## ⬇️Click an example to load")
|
256 |
+
|
257 |
+
gr.Examples(
|
258 |
+
examples=example_text,
|
259 |
+
inputs=[paper_text_input],
|
260 |
+
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
261 |
+
fn=load_example_and_set_index,
|
262 |
+
run_on_click = True,
|
263 |
+
# label="⬇️ Click an example to load"
|
264 |
+
)
|
265 |
+
|
266 |
+
# Step 1: Extract Research Elements
|
267 |
+
extract_button.click(
|
268 |
+
fn=extract_research_elements,
|
269 |
+
inputs=paper_text_input,
|
270 |
+
outputs=[tasks_output, gaps_output, keywords_output, recent_works_output]
|
271 |
+
)
|
272 |
+
|
273 |
+
generate_button.click(
|
274 |
+
fn=generate_and_store,
|
275 |
+
inputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output],
|
276 |
+
outputs=[hypothesis_output, experiment_plan_output, hypothesis_state, experiment_plan_state]
|
277 |
+
)
|
278 |
+
|
279 |
+
|
280 |
+
|
281 |
+
########## Phase 2 & 3: Experiment implementation and execution ##############
|
282 |
+
with gr.Tab("🧪 Stage 2 & Stage 3: Experiment implementation and execution"):
|
283 |
+
gr.Markdown("### Interact with the ExperimentAgent")
|
284 |
+
|
285 |
+
with gr.Row():
|
286 |
+
with gr.Column():
|
287 |
+
with gr.Group():
|
288 |
+
gr.Markdown("### 🌟 Generated Research Idea")
|
289 |
+
with gr.Row():
|
290 |
+
idea_input = gr.Textbox(label="Generated Research Hypothesis", lines=30, interactive=False)
|
291 |
+
plan_input = gr.Textbox(label="Generated Experiment Plan", lines=30, interactive=False)
|
292 |
+
|
293 |
+
with gr.Column():
|
294 |
+
start_exp_agnet = gr.Button("⚙️ Start / Reset ExperimentAgent", elem_classes=["agent-btn"])
|
295 |
+
with gr.Group():
|
296 |
+
gr.Markdown("### Implementation + Execution Log")
|
297 |
+
log = gr.Textbox(label="📖 Execution Log", lines=20, interactive=False)
|
298 |
+
code_display = gr.Code(label="🧑💻 Implementation", language="python", interactive=False)
|
299 |
+
|
300 |
+
with gr.Column():
|
301 |
+
response = gr.Textbox(label="🤖 ExperimentAgent Response", lines=30, interactive=False)
|
302 |
+
feedback = gr.Textbox(placeholder="N/A", label="🧑🔬 User Feedback", lines=3, interactive=True)
|
303 |
+
submit_button = gr.Button("Submit", elem_classes=["Submit-btn"])
|
304 |
+
|
305 |
+
hypothesis_state.change(
|
306 |
+
fn=load_phase_2_inputs,
|
307 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
308 |
+
outputs=[idea_input, plan_input, code_display]
|
309 |
+
)
|
310 |
+
|
311 |
+
# Start research agent
|
312 |
+
start_exp_agnet.click(
|
313 |
+
fn=start_experiment_agent,
|
314 |
+
inputs=[hypothesis_state, experiment_plan_state],
|
315 |
+
outputs=[code_display, log, response, feedback]
|
316 |
+
)
|
317 |
+
|
318 |
+
submit_button.click(
|
319 |
+
fn=submit_feedback,
|
320 |
+
inputs=[feedback, log, response],
|
321 |
+
outputs=[log, response, code_display, feedback]
|
322 |
+
)
|
323 |
+
|
324 |
+
# Test
|
325 |
+
if __name__ == "__main__":
|
326 |
+
step_index = 0
|
327 |
+
app.launch()
|
app.py
CHANGED
@@ -74,7 +74,8 @@ def extract_research_elements(paper_text):
|
|
74 |
if not state_example or paper_text == "":
|
75 |
return "", "", "", ""
|
76 |
state_extract = True
|
77 |
-
if paper_text
|
|
|
78 |
return "", "", "", ""
|
79 |
example = example_data[index_ex]
|
80 |
tasks = example['research_tasks']
|
@@ -212,11 +213,11 @@ def handle_example_click(example_index):
|
|
212 |
return load_example(index_ex) # Simply return the text to display it in the textbox
|
213 |
|
214 |
# Gradio Interface
|
215 |
-
with gr.Blocks(theme=gr.themes.Default()) as app:
|
216 |
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
217 |
gr.Markdown("### ")
|
218 |
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
219 |
-
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)
|
220 |
|
221 |
|
222 |
|
@@ -250,6 +251,8 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
|
|
250 |
with gr.Row():
|
251 |
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
252 |
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
|
|
|
|
253 |
|
254 |
gr.Examples(
|
255 |
examples=example_text,
|
@@ -257,7 +260,7 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
|
|
257 |
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
258 |
fn=load_example_and_set_index,
|
259 |
run_on_click = True,
|
260 |
-
label="⬇️ Click an example to load"
|
261 |
)
|
262 |
|
263 |
# Step 1: Extract Research Elements
|
|
|
74 |
if not state_example or paper_text == "":
|
75 |
return "", "", "", ""
|
76 |
state_extract = True
|
77 |
+
if not paper_text.startswith("Title:\t" + example_data[index_ex]["title"]):
|
78 |
+
print("Mismatch detected.")
|
79 |
return "", "", "", ""
|
80 |
example = example_data[index_ex]
|
81 |
tasks = example['research_tasks']
|
|
|
213 |
return load_example(index_ex) # Simply return the text to display it in the textbox
|
214 |
|
215 |
# Gradio Interface
|
216 |
+
with gr.Blocks(css=".gr-examples-label {display: none;}", theme=gr.themes.Default()) as app:
|
217 |
gr.Markdown("# MLR- Copilot: Machine Learning Research based on LLM Agents")
|
218 |
gr.Markdown("### ")
|
219 |
gr.Markdown("## <span style='color:Orange;'> This UI is for predefined example demo only.</span>")
|
220 |
+
gr.Markdown("## <span style='color:Orange;'> To reproduce the results please use [Github](https://github.com/du-nlp-lab/MLR-Copilot/)</span>")
|
221 |
|
222 |
|
223 |
|
|
|
251 |
with gr.Row():
|
252 |
hypothesis_output = gr.Textbox(label="Generated Hypothesis", lines=20, interactive=False)
|
253 |
experiment_plan_output = gr.Textbox(label="Generated Experiment Plan", lines=20, interactive=False)
|
254 |
+
|
255 |
+
gr.Markdown("## ⬇️Click an example to load")
|
256 |
|
257 |
gr.Examples(
|
258 |
examples=example_text,
|
|
|
260 |
outputs=[paper_text_input, tasks_output, gaps_output, keywords_output, recent_works_output, hypothesis_output, experiment_plan_output],
|
261 |
fn=load_example_and_set_index,
|
262 |
run_on_click = True,
|
263 |
+
# label="⬇️ Click an example to load"
|
264 |
)
|
265 |
|
266 |
# Step 1: Extract Research Elements
|