bpHigh commited on
Commit
634f701
Β·
verified Β·
1 Parent(s): 63f5a9d

Add progress bars

Browse files
Files changed (1) hide show
  1. app.py +33 -6
app.py CHANGED
@@ -224,17 +224,19 @@ def upload_file_handler(files):
224
  return files
225
  return []
226
 
227
- async def generate_plan(history, file_cache):
228
  """Generate a plan using the planning prompt and Gemini API"""
229
 
230
  # Build conversation history
 
 
231
  conversation_history = ""
232
  if history:
233
  for user_msg, ai_msg in history:
234
  conversation_history += f"User: {user_msg}\n"
235
  if ai_msg:
236
  conversation_history += f"Assistant: {ai_msg}\n"
237
-
238
  try:
239
  mcp_tool_func = modal.Function.from_name("HuggingFace-MCP","connect_and_get_tools")
240
  hf_query_gen_tool_details = mcp_tool_func.remote()
@@ -247,12 +249,15 @@ async def generate_plan(history, file_cache):
247
  Tool_Details=hf_query_gen_tool_details
248
  ) + "\n\n" + conversation_history
249
  # Get plan from Gemini
 
 
250
  plan = generate_with_gemini(formatted_prompt, "Planning with gemini")
251
-
252
  # Parse the plan
253
  parsed_plan = parse_json_codefences(plan)
254
  print(parsed_plan)
255
  # Call tool to get tool calls
 
 
256
  try:
257
  mcp_call_tool_func = modal.Function.from_name(app_name="HuggingFace-MCP",name="call_tool")
258
  tool_calls = []
@@ -262,6 +267,8 @@ async def generate_plan(history, file_cache):
262
  print(str(e))
263
  tool_calls = []
264
  print(tool_calls)
 
 
265
  if tool_calls!=[]:
266
  formatted_context_prompt = hf_context_gen_prompt.format(
267
  Conversation=conversation_history,
@@ -277,12 +284,14 @@ async def generate_plan(history, file_cache):
277
  Results="Couldn't generate the tool calls results but use your knowledge about huggingface platform(models, datasets, spaces, training libraries, transfomers library etc.) as backup to generate the plan"
278
  )
279
  context = generate_with_gemini(formatted_context_prompt, "Generating context for plan")
 
280
 
281
  return context
282
 
283
- def generate_code_with_devstral(plan_text, history, file_cache):
284
  """Generate code using the deployed Devstral model via Modal"""
285
-
 
286
  if not MODAL_AVAILABLE:
287
  return "❌ Modal not available. Please install Modal to use code generation."
288
 
@@ -332,6 +341,7 @@ def generate_code_with_devstral(plan_text, history, file_cache):
332
  api_key = os.getenv("DEVSTRAL_API_KEY")
333
  print(f"πŸš€ Generating code using Devstral...")
334
  print(f"πŸ“‘ Connecting to: {base_url}")
 
335
 
336
  try:
337
  devstral_inference_func = modal.Function.from_name("devstral-inference-client", "run_devstral_inference")
@@ -343,16 +353,28 @@ def generate_code_with_devstral(plan_text, history, file_cache):
343
  mode="single"
344
  )
345
  if result and "response" in result:
 
 
346
  code_output = result["response"]
347
  return f"πŸš€ **Generated Code:**\n\n{code_output}"
348
  else:
 
 
349
  return "❌ **Error:** No response received from Devstral model."
350
  except Exception as e:
 
 
351
  return f"❌ **Error:** {str(e)}"
352
- def execute_code(code_output):
 
 
353
  try:
 
 
354
  code = parse_python_codefences(code_output)
355
  print(code)
 
 
356
  result = code_eval(code)
357
  if isinstance(result, dict):
358
  result_str = json.dumps(result, indent=4)
@@ -360,8 +382,13 @@ def execute_code(code_output):
360
  result_str = '\n'.join(str(x) for x in result)
361
  else:
362
  result_str = str(result)
 
 
 
363
  return result_str
364
  except Exception as e:
 
 
365
  return f"❌ **Error:** {str(e)}"
366
 
367
  # Custom CSS for a sleek design
 
224
  return files
225
  return []
226
 
227
+ async def generate_plan(history, file_cache, progress=gr.Progress()):
228
  """Generate a plan using the planning prompt and Gemini API"""
229
 
230
  # Build conversation history
231
+ progress(0, desc="Starting")
232
+
233
  conversation_history = ""
234
  if history:
235
  for user_msg, ai_msg in history:
236
  conversation_history += f"User: {user_msg}\n"
237
  if ai_msg:
238
  conversation_history += f"Assistant: {ai_msg}\n"
239
+ progress(0.05, desc="Getting HF MCP tools")
240
  try:
241
  mcp_tool_func = modal.Function.from_name("HuggingFace-MCP","connect_and_get_tools")
242
  hf_query_gen_tool_details = mcp_tool_func.remote()
 
249
  Tool_Details=hf_query_gen_tool_details
250
  ) + "\n\n" + conversation_history
251
  # Get plan from Gemini
252
+ progress(0.15, desc="Strategizing which tools to call")
253
+
254
  plan = generate_with_gemini(formatted_prompt, "Planning with gemini")
 
255
  # Parse the plan
256
  parsed_plan = parse_json_codefences(plan)
257
  print(parsed_plan)
258
  # Call tool to get tool calls
259
+ progress(0.50, desc="calling HF platform tools and getting data")
260
+
261
  try:
262
  mcp_call_tool_func = modal.Function.from_name(app_name="HuggingFace-MCP",name="call_tool")
263
  tool_calls = []
 
267
  print(str(e))
268
  tool_calls = []
269
  print(tool_calls)
270
+ progress(0.75, desc="Generating Plan context from tool call info")
271
+
272
  if tool_calls!=[]:
273
  formatted_context_prompt = hf_context_gen_prompt.format(
274
  Conversation=conversation_history,
 
284
  Results="Couldn't generate the tool calls results but use your knowledge about huggingface platform(models, datasets, spaces, training libraries, transfomers library etc.) as backup to generate the plan"
285
  )
286
  context = generate_with_gemini(formatted_context_prompt, "Generating context for plan")
287
+ progress(1, desc="Complete Plan generated")
288
 
289
  return context
290
 
291
+ def generate_code_with_devstral(plan_text, history, file_cache, progress=gr.Progress()):
292
  """Generate code using the deployed Devstral model via Modal"""
293
+ progress(0, desc="Starting Codegen")
294
+
295
  if not MODAL_AVAILABLE:
296
  return "❌ Modal not available. Please install Modal to use code generation."
297
 
 
341
  api_key = os.getenv("DEVSTRAL_API_KEY")
342
  print(f"πŸš€ Generating code using Devstral...")
343
  print(f"πŸ“‘ Connecting to: {base_url}")
344
+ progress(0.1, desc="Calling Devstral VLLM API server deployed on Modal")
345
 
346
  try:
347
  devstral_inference_func = modal.Function.from_name("devstral-inference-client", "run_devstral_inference")
 
353
  mode="single"
354
  )
355
  if result and "response" in result:
356
+ progress(1, desc="Code has been generated")
357
+
358
  code_output = result["response"]
359
  return f"πŸš€ **Generated Code:**\n\n{code_output}"
360
  else:
361
+ progress(1, desc="Error")
362
+
363
  return "❌ **Error:** No response received from Devstral model."
364
  except Exception as e:
365
+ progress(1, desc="Error")
366
+
367
  return f"❌ **Error:** {str(e)}"
368
+ def execute_code(code_output, progress=gr.Progress()):
369
+ progress(0, desc="Starting Code Execution")
370
+
371
  try:
372
+ progress(0.05, desc="Parsing Python codefence")
373
+
374
  code = parse_python_codefences(code_output)
375
  print(code)
376
+ progress(0.1, desc="Running code in sandbox")
377
+
378
  result = code_eval(code)
379
  if isinstance(result, dict):
380
  result_str = json.dumps(result, indent=4)
 
382
  result_str = '\n'.join(str(x) for x in result)
383
  else:
384
  result_str = str(result)
385
+
386
+ progress(1, desc="Code Execution Complete")
387
+
388
  return result_str
389
  except Exception as e:
390
+ progress(1, desc="Error")
391
+
392
  return f"❌ **Error:** {str(e)}"
393
 
394
  # Custom CSS for a sleek design