Master-warrier commited on
Commit
368f482
Β·
1 Parent(s): 50c423b

Upgraded gradio interface

Browse files
Files changed (2) hide show
  1. app.py +47 -39
  2. prompts/devstral_coding_prompt.py +2 -0
app.py CHANGED
@@ -224,11 +224,11 @@ def upload_file_handler(files):
224
  return files
225
  return []
226
 
227
- async def generate_plan(history, file_cache, progress=gr.Progress()):
228
  """Generate a plan using the planning prompt and Gemini API"""
229
 
230
  # Build conversation history
231
- progress(0, desc="Starting")
232
 
233
  conversation_history = ""
234
  if history:
@@ -236,7 +236,7 @@ async def generate_plan(history, file_cache, progress=gr.Progress()):
236
  conversation_history += f"User: {user_msg}\n"
237
  if ai_msg:
238
  conversation_history += f"Assistant: {ai_msg}\n"
239
- progress(0.05, desc="Getting HF MCP tools")
240
  try:
241
  mcp_tool_func = modal.Function.from_name("HuggingFace-MCP","connect_and_get_tools")
242
  hf_query_gen_tool_details = mcp_tool_func.remote()
@@ -249,14 +249,14 @@ async def generate_plan(history, file_cache, progress=gr.Progress()):
249
  Tool_Details=hf_query_gen_tool_details
250
  ) + "\n\n" + conversation_history
251
  # Get plan from Gemini
252
- progress(0.15, desc="Strategizing which tools to call")
253
 
254
  plan = generate_with_gemini(formatted_prompt, "Planning with gemini")
255
  # Parse the plan
256
  parsed_plan = parse_json_codefences(plan)
257
  print(parsed_plan)
258
  # Call tool to get tool calls
259
- progress(0.50, desc="calling HF platform tools and getting data")
260
 
261
  try:
262
  mcp_call_tool_func = modal.Function.from_name(app_name="HuggingFace-MCP",name="call_tool")
@@ -267,7 +267,7 @@ async def generate_plan(history, file_cache, progress=gr.Progress()):
267
  print(str(e))
268
  tool_calls = []
269
  print(tool_calls)
270
- progress(0.75, desc="Generating Plan context from tool call info")
271
 
272
  if tool_calls!=[]:
273
  formatted_context_prompt = hf_context_gen_prompt.format(
@@ -284,19 +284,19 @@ async def generate_plan(history, file_cache, progress=gr.Progress()):
284
  Results="Couldn't generate the tool calls results but use your knowledge about huggingface platform(models, datasets, spaces, training libraries, transfomers library etc.) as backup to generate the plan"
285
  )
286
  context = generate_with_gemini(formatted_context_prompt, "Generating context for plan")
287
- progress(1, desc="Complete Plan generated")
288
 
289
- return context
290
-
291
- def generate_code_with_devstral(plan_text, history, file_cache, progress=gr.Progress()):
292
  """Generate code using the deployed Devstral model via Modal"""
293
- progress(0, desc="Starting Codegen")
294
 
295
  if not MODAL_AVAILABLE:
296
- return "❌ Modal not available. Please install Modal to use code generation."
 
297
 
298
- if not plan_text or not plan_text.strip():
299
- return "❌ Please generate a plan first before generating code."
 
300
 
301
  # try:
302
  # Extract user query from conversation history
@@ -341,7 +341,7 @@ def generate_code_with_devstral(plan_text, history, file_cache, progress=gr.Prog
341
  api_key = os.getenv("DEVSTRAL_API_KEY")
342
  print(f"πŸš€ Generating code using Devstral...")
343
  print(f"πŸ“‘ Connecting to: {base_url}")
344
- progress(0.1, desc="Calling Devstral VLLM API server deployed on Modal")
345
 
346
  try:
347
  devstral_inference_func = modal.Function.from_name("devstral-inference-client", "run_devstral_inference")
@@ -353,28 +353,30 @@ def generate_code_with_devstral(plan_text, history, file_cache, progress=gr.Prog
353
  mode="single"
354
  )
355
  if result and "response" in result:
356
- progress(1, desc="Code has been generated")
357
-
358
  code_output = result["response"]
359
- return f"πŸš€ **Generated Code:**\n\n{code_output}"
360
  else:
361
- progress(1, desc="Error")
362
-
363
- return "❌ **Error:** No response received from Devstral model."
364
  except Exception as e:
365
- progress(1, desc="Error")
366
-
367
- return f"❌ **Error:** {str(e)}"
368
- def execute_code(code_output, progress=gr.Progress()):
369
- progress(0, desc="Starting Code Execution")
370
 
371
  try:
372
- progress(0.05, desc="Parsing Python codefence")
 
 
373
 
 
374
  code = parse_python_codefences(code_output)
375
  print(code)
376
- progress(0.1, desc="Running code in sandbox")
377
-
 
 
 
 
378
  result = code_eval(code)
379
  if isinstance(result, dict):
380
  result_str = json.dumps(result, indent=4)
@@ -383,13 +385,9 @@ def execute_code(code_output, progress=gr.Progress()):
383
  else:
384
  result_str = str(result)
385
 
386
- progress(1, desc="Code Execution Complete")
387
-
388
- return result_str
389
  except Exception as e:
390
- progress(1, desc="Error")
391
-
392
- return f"❌ **Error:** {str(e)}"
393
 
394
  # Custom CSS for a sleek design
395
  custom_css = """
@@ -465,6 +463,14 @@ custom_css = """
465
  font-size: 1.2em !important;
466
  margin-bottom: 30px !important;
467
  }
 
 
 
 
 
 
 
 
468
  """
469
 
470
  # Create the Gradio interface
@@ -490,18 +496,20 @@ with gr.Blocks(css=custom_css, title="Data Science Requirements Gathering Agent"
490
  )
491
 
492
  plan_output = gr.Markdown(
 
493
  label="Generated Plan",
494
- visible=True,
495
- max_height=150,
496
  )
497
 
498
  code_output = gr.Markdown(
 
499
  label="Generated Code",
500
- visible=True,max_height=150,
501
  )
502
  execution_output = gr.Markdown(
 
503
  label="Execution Output",
504
- visible=True,max_height=150,
505
  )
506
  with gr.Row():
507
  with gr.Column(scale=4):
 
224
  return files
225
  return []
226
 
227
+ async def generate_plan(history, file_cache):
228
  """Generate a plan using the planning prompt and Gemini API"""
229
 
230
  # Build conversation history
231
+ yield "**⏳ Generating plan...** (Starting)"
232
 
233
  conversation_history = ""
234
  if history:
 
236
  conversation_history += f"User: {user_msg}\n"
237
  if ai_msg:
238
  conversation_history += f"Assistant: {ai_msg}\n"
239
+ yield "**⏳ Generating plan...** (Getting HF MCP tools)"
240
  try:
241
  mcp_tool_func = modal.Function.from_name("HuggingFace-MCP","connect_and_get_tools")
242
  hf_query_gen_tool_details = mcp_tool_func.remote()
 
249
  Tool_Details=hf_query_gen_tool_details
250
  ) + "\n\n" + conversation_history
251
  # Get plan from Gemini
252
+ yield "**⏳ Generating plan...** (Strategizing which tools to call)"
253
 
254
  plan = generate_with_gemini(formatted_prompt, "Planning with gemini")
255
  # Parse the plan
256
  parsed_plan = parse_json_codefences(plan)
257
  print(parsed_plan)
258
  # Call tool to get tool calls
259
+ yield "**⏳ Generating plan...** (calling HF platform tools and getting data)"
260
 
261
  try:
262
  mcp_call_tool_func = modal.Function.from_name(app_name="HuggingFace-MCP",name="call_tool")
 
267
  print(str(e))
268
  tool_calls = []
269
  print(tool_calls)
270
+ yield "**⏳ Generating plan...** (Generating Plan context from tool call info)"
271
 
272
  if tool_calls!=[]:
273
  formatted_context_prompt = hf_context_gen_prompt.format(
 
284
  Results="Couldn't generate the tool calls results but use your knowledge about huggingface platform(models, datasets, spaces, training libraries, transfomers library etc.) as backup to generate the plan"
285
  )
286
  context = generate_with_gemini(formatted_context_prompt, "Generating context for plan")
287
+ yield context
288
 
289
+ def generate_code_with_devstral(plan_text, history, file_cache):
 
 
290
  """Generate code using the deployed Devstral model via Modal"""
291
+ yield "**⏳ Generating code...** (Starting Codegen)"
292
 
293
  if not MODAL_AVAILABLE:
294
+ yield "❌ Modal not available. Please install Modal to use code generation."
295
+ return
296
 
297
+ if not plan_text or not plan_text.strip() or "**Plan will be generated here...**" in plan_text:
298
+ yield "❌ Please generate a plan first before generating code."
299
+ return
300
 
301
  # try:
302
  # Extract user query from conversation history
 
341
  api_key = os.getenv("DEVSTRAL_API_KEY")
342
  print(f"πŸš€ Generating code using Devstral...")
343
  print(f"πŸ“‘ Connecting to: {base_url}")
344
+ yield "**⏳ Generating code...** (Calling Devstral VLLM API server deployed on Modal)"
345
 
346
  try:
347
  devstral_inference_func = modal.Function.from_name("devstral-inference-client", "run_devstral_inference")
 
353
  mode="single"
354
  )
355
  if result and "response" in result:
 
 
356
  code_output = result["response"]
357
+ yield f"πŸš€ **Generated Code:**\n\n{code_output}"
358
  else:
359
+ yield "❌ **Error:** No response received from Devstral model."
 
 
360
  except Exception as e:
361
+ yield f"❌ **Error:** {str(e)}"
362
+ def execute_code(code_output):
363
+ """Executes Python code from a string and returns the output."""
364
+ yield "**⏳ Executing code...** (Starting)"
 
365
 
366
  try:
367
+ if "**Code will be generated here...**" in code_output or "Generated Code" not in code_output:
368
+ yield "❌ Please generate code first before executing."
369
+ return
370
 
371
+ yield "**⏳ Executing code...** (Parsing code)"
372
  code = parse_python_codefences(code_output)
373
  print(code)
374
+
375
+ if not code or not code.strip():
376
+ yield "❌ No Python code found to execute."
377
+ return
378
+
379
+ yield "**⏳ Executing code...** (Running in sandbox)"
380
  result = code_eval(code)
381
  if isinstance(result, dict):
382
  result_str = json.dumps(result, indent=4)
 
385
  else:
386
  result_str = str(result)
387
 
388
+ yield f"**βœ… Execution Complete:**\n\n```\n{result_str}\n```"
 
 
389
  except Exception as e:
390
+ yield f"❌ **Error executing code:** {str(e)}"
 
 
391
 
392
  # Custom CSS for a sleek design
393
  custom_css = """
 
463
  font-size: 1.2em !important;
464
  margin-bottom: 30px !important;
465
  }
466
+
467
+ .output-markdown {
468
+ height: 250px;
469
+ overflow-y: auto !important;
470
+ border: 1px solid #e0e0e0;
471
+ padding: 10px;
472
+ border-radius: 5px;
473
+ }
474
  """
475
 
476
  # Create the Gradio interface
 
496
  )
497
 
498
  plan_output = gr.Markdown(
499
+ "**Plan will be generated here...**",
500
  label="Generated Plan",
501
+ elem_classes=["output-markdown"],
 
502
  )
503
 
504
  code_output = gr.Markdown(
505
+ "**Code will be generated here...**",
506
  label="Generated Code",
507
+ elem_classes=["output-markdown"],
508
  )
509
  execution_output = gr.Markdown(
510
+ "**Execution output will be shown here...**",
511
  label="Execution Output",
512
+ elem_classes=["output-markdown"],
513
  )
514
  with gr.Row():
515
  with gr.Column(scale=4):
prompts/devstral_coding_prompt.py CHANGED
@@ -14,4 +14,6 @@ devstral_code_gen_user_prompt ="""
14
  {context}
15
 
16
  Just return the full execution code block in a python codefence as shown below without any explanation or suffix or prefix text.
 
 
17
  """
 
14
  {context}
15
 
16
  Just return the full execution code block in a python codefence as shown below without any explanation or suffix or prefix text.
17
+
18
+ Ensure that the code is EXECUTABLE and does not contain any errors.
19
  """