Jeremy Live
commited on
Commit
·
9b08ac4
1
Parent(s):
b78f40d
v3
Browse files
app.py
CHANGED
|
@@ -128,17 +128,21 @@ def run_crewai_process(user_query, model, temperature):
|
|
| 128 |
final_answer_chat = [{"role": "user", "content": user_query}]
|
| 129 |
|
| 130 |
try:
|
| 131 |
-
#
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
yield "Starting CrewAI process...", final_answer_chat, agent_thoughts, generated_code, execution_output, generated_plot_path
|
| 136 |
|
|
|
|
| 137 |
final_result = crew.kickoff(inputs={"query": user_query})
|
| 138 |
|
| 139 |
# Get the captured CrewAI output (agent thoughts)
|
| 140 |
agent_thoughts = output_buffer.getvalue()
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
# The final result is the generated code from the code_output_agent
|
| 144 |
generated_code_raw = str(final_result).strip()
|
|
@@ -157,11 +161,9 @@ def run_crewai_process(user_query, model, temperature):
|
|
| 157 |
return # Exit the generator
|
| 158 |
|
| 159 |
# Format for Gradio Chatbot (list of dictionaries with 'role' and 'content' keys only)
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
]
|
| 164 |
-
yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
|
| 165 |
|
| 166 |
# --- Execute the generated code ---
|
| 167 |
plot_file_path = 'plot.png' # Expected plot file name
|
|
@@ -212,14 +214,21 @@ def run_crewai_process(user_query, model, temperature):
|
|
| 212 |
execution_output = "No code was generated to execute."
|
| 213 |
|
| 214 |
# Update final answer chat to reflect execution attempt
|
| 215 |
-
|
| 216 |
-
{"role": "user", "content": str(user_query)},
|
| 217 |
-
{"role": "assistant", "content": "Code execution finished. See 'Execution Output'."}
|
| 218 |
-
]
|
| 219 |
if generated_plot_path:
|
| 220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
else:
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
|
| 225 |
|
|
@@ -232,7 +241,7 @@ def run_crewai_process(user_query, model, temperature):
|
|
| 232 |
{"role": "user", "content": str(user_query)},
|
| 233 |
{"role": "assistant", "content": error_message}
|
| 234 |
]
|
| 235 |
-
yield
|
| 236 |
|
| 237 |
finally:
|
| 238 |
# Restore original stdout
|
|
|
|
| 128 |
final_answer_chat = [{"role": "user", "content": user_query}]
|
| 129 |
|
| 130 |
try:
|
| 131 |
+
# Initial status update with proper message format
|
| 132 |
+
initial_message = {"role": "assistant", "content": "Starting CrewAI process..."}
|
| 133 |
+
final_answer_chat = [{"role": "user", "content": str(user_query)}, initial_message]
|
| 134 |
+
yield final_answer_chat, agent_thoughts, generated_code, execution_output, None, None
|
|
|
|
| 135 |
|
| 136 |
+
# Run the crew process
|
| 137 |
final_result = crew.kickoff(inputs={"query": user_query})
|
| 138 |
|
| 139 |
# Get the captured CrewAI output (agent thoughts)
|
| 140 |
agent_thoughts = output_buffer.getvalue()
|
| 141 |
+
|
| 142 |
+
# Update with processing message
|
| 143 |
+
processing_message = {"role": "assistant", "content": "Processing complete. Generating code..."}
|
| 144 |
+
final_answer_chat = [{"role": "user", "content": str(user_query)}, processing_message]
|
| 145 |
+
yield final_answer_chat, agent_thoughts, generated_code, execution_output, None, None
|
| 146 |
|
| 147 |
# The final result is the generated code from the code_output_agent
|
| 148 |
generated_code_raw = str(final_result).strip()
|
|
|
|
| 161 |
return # Exit the generator
|
| 162 |
|
| 163 |
# Format for Gradio Chatbot (list of dictionaries with 'role' and 'content' keys only)
|
| 164 |
+
code_gen_message = {"role": "assistant", "content": "Code generation complete. See the 'Generated Code' box. Attempting to execute code..."}
|
| 165 |
+
final_answer_chat = [{"role": "user", "content": str(user_query)}, code_gen_message]
|
| 166 |
+
yield final_answer_chat, agent_thoughts, generated_code, execution_output, None, None
|
|
|
|
|
|
|
| 167 |
|
| 168 |
# --- Execute the generated code ---
|
| 169 |
plot_file_path = 'plot.png' # Expected plot file name
|
|
|
|
| 214 |
execution_output = "No code was generated to execute."
|
| 215 |
|
| 216 |
# Update final answer chat to reflect execution attempt
|
| 217 |
+
execution_complete_msg = "Code execution finished. See 'Execution Output'."
|
|
|
|
|
|
|
|
|
|
| 218 |
if generated_plot_path:
|
| 219 |
+
plot_msg = "Plot generated successfully. See 'Generated Plot'."
|
| 220 |
+
final_answer_chat = [
|
| 221 |
+
{"role": "user", "content": str(user_query)},
|
| 222 |
+
{"role": "assistant", "content": execution_complete_msg},
|
| 223 |
+
{"role": "assistant", "content": plot_msg}
|
| 224 |
+
]
|
| 225 |
else:
|
| 226 |
+
no_plot_msg = "No plot was generated. Check the execution output for details."
|
| 227 |
+
final_answer_chat = [
|
| 228 |
+
{"role": "user", "content": str(user_query)},
|
| 229 |
+
{"role": "assistant", "content": execution_complete_msg},
|
| 230 |
+
{"role": "assistant", "content": no_plot_msg}
|
| 231 |
+
]
|
| 232 |
|
| 233 |
yield agent_thoughts, final_answer_chat, generated_code, execution_output, generated_plot_path
|
| 234 |
|
|
|
|
| 241 |
{"role": "user", "content": str(user_query)},
|
| 242 |
{"role": "assistant", "content": error_message}
|
| 243 |
]
|
| 244 |
+
yield final_answer_chat, agent_thoughts, generated_code, execution_output, None, None
|
| 245 |
|
| 246 |
finally:
|
| 247 |
# Restore original stdout
|