ans123 commited on
Commit
9eeef04
Β·
verified Β·
1 Parent(s): e1c0698

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -6
app.py CHANGED
@@ -54,7 +54,7 @@ except Exception as e:
54
  client = None
55
 
56
  # --- Model configuration ---
57
- MODEL_ID = "llama-3.1-70b-versatile" # Using Groq's Llama 3.1 70B model
58
 
59
  # --- Constants ---
60
  # Using the same enhanced constants from previous versions
@@ -444,7 +444,7 @@ def search_web_serper(search_query: str, search_type: str = 'general', location:
444
  except Exception as e: logger.error(f"Error processing Serper response: {e}"); return json.dumps({"error": "Failed to process web search results."})
445
 
446
 
447
- # --- AI Interaction Logic (Using Groq Llama 3.1 with Streaming) ---
448
  def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None, None]:
449
  """
450
  Gets response from Groq, handling context, system prompt, tool calls,
@@ -467,7 +467,8 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
467
 
468
  # System prompt (identical persona to v4)
469
  current_emotion_display = user_profile.get('current_emotion', 'how you feel'); user_name = user_profile.get('name', 'there'); career_goal = user_profile.get('career_goal', 'your goals'); location = user_profile.get('location', 'your area'); industry = user_profile.get('industry', 'your field'); exp_level = user_profile.get('experience_level', 'your experience level')
470
- system_prompt = f"""You are Aishura, an advanced AI career assistant powered by Groq's Llama 3.1 model. Your core mission is to provide **empathetic, supportive, and highly personalized career guidance**. You are talking to {user_name}. **Persona & Style:** Empathetic, validating (acknowledge {current_emotion_display}), collaborative ("we", "us"), positive, action-oriented, personalized (use {user_name}, {career_goal}, {location}, {industry}, {exp_level}), concise, clear (markdown). **Functionality:** 1. Acknowledge & Empathize. 2. Address Query Directly. 3. Leverage Tools Strategically: Suggest `generate_document_template`, `create_personalized_routine`, `analyze_resume`, `analyze_portfolio`, `extract_and_rate_skills_from_resume` proactively. Use `search_jobs_courses_skills` ONLY when explicitly asked for jobs/courses/skills/company info (use profile details for query, specify type, present results clearly). Do NOT use tools for general chat. 4. Synthesize Tool Results: Explain relevance. 5. Maintain Context. 6. Handle Errors Gracefully: Apologize simply, suggest alternatives."""
 
471
 
472
  # Prepare message history (OpenAI/Groq compatible format)
473
  messages = [{"role": "system", "content": system_prompt}]
@@ -486,7 +487,9 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
486
  if valid_tool_calls: api_msg['tool_calls'] = msg['tool_calls']
487
  else:
488
  if api_msg['content'] is None: continue
489
- if api_msg.get('content') is not None or api_msg.get('tool_calls'): messages.append(api_msg)
 
 
490
  elif msg["role"] == "tool":
491
  if 'tool_call_id' in msg and 'content' in msg and isinstance(msg.get('content'), str):
492
  api_msg['tool_call_id'] = msg['tool_call_id']; api_msg['content'] = msg['content']; messages.append(api_msg)
@@ -514,7 +517,14 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
514
  finish_reason = response.choices[0].finish_reason
515
 
516
  # --- Handle Groq API Errors ---
517
- except APIError as e: logger.error(f"Groq API Error: {e.status_code} - {e.response}"); yield f"AI service error ({e.message}). Try again." ; return
 
 
 
 
 
 
 
518
  except APITimeoutError: logger.error("Groq timed out."); yield "AI service request timed out. Try again."; return
519
  except APIConnectionError as e: logger.error(f"Groq Connection Error: {e}"); yield "Cannot connect to AI service."; return
520
  except RateLimitError: logger.error("Groq Rate Limit Exceeded."); yield "AI service busy (Rate Limit). Try again shortly."; return
@@ -603,6 +613,10 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
603
  if delta_content:
604
  yield delta_content # Yield the text chunk to the caller (Gradio)
605
  full_response_content += delta_content
 
 
 
 
606
 
607
  logger.info("Finished streaming final response after tool calls.")
608
  # Store the complete final assistant response in DB
@@ -882,7 +896,8 @@ def create_interface():
882
  # --- Build Gradio Interface (Structure identical to v4) ---
883
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="sky", secondary_hue="blue", font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]), title="Aishura v5 (Groq)") as app:
884
  gr.Markdown("# Aishura - Your Empathetic AI Career Copilot πŸš€")
885
- gr.Markdown("_Powered by Groq Llama 3.1 & Real-Time Data_") # Updated subtitle
 
886
 
887
  # Welcome Screen
888
  with gr.Group(visible=True) as welcome_group:
@@ -969,6 +984,7 @@ if __name__ == "__main__":
969
  if not SERPER_API_KEY: print("⚠️ WARNING: SERPER_API_KEY not found. Live web search DISABLED.")
970
  else: print("βœ… SERPER_API_KEY found.")
971
  if not client: print("❌ ERROR: Groq client failed to initialize. AI features DISABLED.")
 
972
  else: print(f"βœ… Groq client initialized for model '{MODEL_ID}'.")
973
  print("---------------------------------------------------\n")
974
 
 
54
  client = None
55
 
56
  # --- Model configuration ---
57
+ MODEL_ID = "llama3-70b-8192" # ** Updated model ID **
58
 
59
  # --- Constants ---
60
  # Using the same enhanced constants from previous versions
 
444
  except Exception as e: logger.error(f"Error processing Serper response: {e}"); return json.dumps({"error": "Failed to process web search results."})
445
 
446
 
447
+ # --- AI Interaction Logic (Using Groq Llama 3 with Streaming) ---
448
  def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None, None]:
449
  """
450
  Gets response from Groq, handling context, system prompt, tool calls,
 
467
 
468
  # System prompt (identical persona to v4)
469
  current_emotion_display = user_profile.get('current_emotion', 'how you feel'); user_name = user_profile.get('name', 'there'); career_goal = user_profile.get('career_goal', 'your goals'); location = user_profile.get('location', 'your area'); industry = user_profile.get('industry', 'your field'); exp_level = user_profile.get('experience_level', 'your experience level')
470
+ # ** Updated System Prompt Model Name **
471
+ system_prompt = f"""You are Aishura, an advanced AI career assistant powered by Groq's {MODEL_ID} model. Your core mission is to provide **empathetic, supportive, and highly personalized career guidance**. You are talking to {user_name}. **Persona & Style:** Empathetic, validating (acknowledge {current_emotion_display}), collaborative ("we", "us"), positive, action-oriented, personalized (use {user_name}, {career_goal}, {location}, {industry}, {exp_level}), concise, clear (markdown). **Functionality:** 1. Acknowledge & Empathize. 2. Address Query Directly. 3. Leverage Tools Strategically: Suggest `generate_document_template`, `create_personalized_routine`, `analyze_resume`, `analyze_portfolio`, `extract_and_rate_skills_from_resume` proactively. Use `search_jobs_courses_skills` ONLY when explicitly asked for jobs/courses/skills/company info (use profile details for query, specify type, present results clearly). Do NOT use tools for general chat. 4. Synthesize Tool Results: Explain relevance. 5. Maintain Context. 6. Handle Errors Gracefully: Apologize simply, suggest alternatives."""
472
 
473
  # Prepare message history (OpenAI/Groq compatible format)
474
  messages = [{"role": "system", "content": system_prompt}]
 
487
  if valid_tool_calls: api_msg['tool_calls'] = msg['tool_calls']
488
  else:
489
  if api_msg['content'] is None: continue
490
+ # Ensure content is not None OR tool_calls are present before appending
491
+ if api_msg.get('content') is not None or api_msg.get('tool_calls'):
492
+ messages.append(api_msg)
493
  elif msg["role"] == "tool":
494
  if 'tool_call_id' in msg and 'content' in msg and isinstance(msg.get('content'), str):
495
  api_msg['tool_call_id'] = msg['tool_call_id']; api_msg['content'] = msg['content']; messages.append(api_msg)
 
517
  finish_reason = response.choices[0].finish_reason
518
 
519
  # --- Handle Groq API Errors ---
520
+ except APIError as e:
521
+ # ** Specific check for model decommission error **
522
+ if e.status_code == 400 and 'model_decommissioned' in str(e.body):
523
+ logger.error(f"Groq API Error: Model '{MODEL_ID}' is decommissioned. {e.body}")
524
+ yield f"AI service error: The model '{MODEL_ID}' is no longer available. Please update the application."
525
+ return
526
+ else:
527
+ logger.error(f"Groq API Error: {e.status_code} - {e.response}"); yield f"AI service error ({e.message}). Try again." ; return
528
  except APITimeoutError: logger.error("Groq timed out."); yield "AI service request timed out. Try again."; return
529
  except APIConnectionError as e: logger.error(f"Groq Connection Error: {e}"); yield "Cannot connect to AI service."; return
530
  except RateLimitError: logger.error("Groq Rate Limit Exceeded."); yield "AI service busy (Rate Limit). Try again shortly."; return
 
613
  if delta_content:
614
  yield delta_content # Yield the text chunk to the caller (Gradio)
615
  full_response_content += delta_content
616
+ # Handle potential finish reason if needed
617
+ if chunk.choices[0].finish_reason:
618
+ logger.info(f"Streaming (after tool call) finished with reason: {chunk.choices[0].finish_reason}")
619
+
620
 
621
  logger.info("Finished streaming final response after tool calls.")
622
  # Store the complete final assistant response in DB
 
896
  # --- Build Gradio Interface (Structure identical to v4) ---
897
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="sky", secondary_hue="blue", font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]), title="Aishura v5 (Groq)") as app:
898
  gr.Markdown("# Aishura - Your Empathetic AI Career Copilot πŸš€")
899
+ # ** Updated Subtitle **
900
+ gr.Markdown("_Powered by Groq Llama3 70B & Real-Time Data_")
901
 
902
  # Welcome Screen
903
  with gr.Group(visible=True) as welcome_group:
 
984
  if not SERPER_API_KEY: print("⚠️ WARNING: SERPER_API_KEY not found. Live web search DISABLED.")
985
  else: print("βœ… SERPER_API_KEY found.")
986
  if not client: print("❌ ERROR: Groq client failed to initialize. AI features DISABLED.")
987
+ # ** Updated Model Name in Check **
988
  else: print(f"βœ… Groq client initialized for model '{MODEL_ID}'.")
989
  print("---------------------------------------------------\n")
990