AIShurA / app.py
ans123's picture
Update app.py
69418bc verified
raw
history blame
86.3 kB
# filename: app_openai_updated.py
import gradio as gr
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt # Not directly used for plotting
import plotly.graph_objects as go
import plotly.express as px
from datetime import datetime, timedelta
import random
import json
import os
import time
import requests
from typing import List, Dict, Any, Optional
import logging
from dotenv import load_dotenv
# import pytz # Not used
import uuid
import re
# import base64 # Not used
# from io import BytesIO # Not used
# from PIL import Image # Not used
# --- Use OpenAI library ---
import openai
# --- Load environment variables ---
load_dotenv()
# --- Set up logging ---
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# --- Configure API keys ---
# Make sure you have OPENAI_API_KEY and SERPER_API_KEY in your .env file or environment
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
SERPER_API_KEY = os.getenv("SERPER_API_KEY")
if not OPENAI_API_KEY:
logger.warning("OPENAI_API_KEY not found. AI features will not work.")
# You might want to raise an error or handle this case gracefully
if not SERPER_API_KEY:
logger.warning("SERPER_API_KEY not found. Web search features will not work.")
# --- Initialize the OpenAI client ---
try:
client = openai.OpenAI(api_key=OPENAI_API_KEY)
# Test connection (optional, uncomment to test during startup)
# client.models.list()
logger.info("OpenAI client initialized successfully.")
except Exception as e:
logger.error(f"Failed to initialize OpenAI client: {e}")
# Handle error appropriately, maybe exit or set client to None
client = None
# --- Model configuration ---
MODEL_ID = "gpt-4o" # Use OpenAI GPT-4o model
# --- Constants ---
EMOTIONS = ["Unmotivated", "Anxious", "Confused", "Excited", "Overwhelmed", "Discouraged"]
GOAL_TYPES = ["Get a job at a big company", "Find an internship", "Change careers", "Improve skills", "Network better"]
USER_DB_PATH = "user_database.json"
RESUME_FOLDER = "user_resumes"
PORTFOLIO_FOLDER = "user_portfolios"
# Ensure folders exist
os.makedirs(RESUME_FOLDER, exist_ok=True)
os.makedirs(PORTFOLIO_FOLDER, exist_ok=True)
# --- Tool Definitions for OpenAI ---
# Define functions that the AI can call.
# These will be implemented as Python functions below.
tools_list = [
{
"type": "function",
"function": {
"name": "get_job_opportunities",
"description": "Search for relevant job opportunities based on query, location, and career goals using web search.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The specific job title, keyword, or role the user is searching for.",
},
"location": {
"type": "string",
"description": "The city, region, or country where the user wants to search for jobs.",
},
"max_results": {
"type": "integer",
"description": "Maximum number of job opportunities to return (default 5).",
},
},
"required": ["query", "location"],
},
}
},
{
"type": "function",
"function": {
"name": "generate_document_template",
"description": "Generate a document template (like a resume or cover letter) based on type, career field, and experience level.",
"parameters": {
"type": "object",
"properties": {
"document_type": {
"type": "string",
"description": "Type of document (e.g., Resume, Cover Letter, Self-introduction).",
},
"career_field": {
"type": "string",
"description": "The career field or industry.",
},
"experience_level": {
"type": "string",
"description": "User's experience level (e.g., Entry, Mid, Senior).",
},
},
"required": ["document_type"],
},
}
},
{
"type": "function",
"function": {
"name": "create_personalized_routine",
"description": "Create a personalized daily or weekly career development routine based on the user's current emotion, goals, and available time.",
"parameters": {
"type": "object",
"properties": {
"emotion": {
"type": "string",
"description": "User's current primary emotional state (e.g., Unmotivated, Anxious).",
},
"goal": {
"type": "string",
"description": "User's specific career goal for this routine.",
},
"available_time_minutes": {
"type": "integer",
"description": "Available time in minutes per day (default 60).",
},
"routine_length_days": {
"type": "integer",
"description": "Length of the routine in days (default 7).",
},
},
"required": ["emotion", "goal"],
},
}
},
{
"type": "function",
"function": {
"name": "analyze_resume",
"description": "Analyze the provided resume text and provide feedback, comparing it against the user's stated career goal.",
"parameters": {
"type": "object",
"properties": {
"resume_text": {
"type": "string",
"description": "The full text of the user's resume.",
},
"career_goal": {
"type": "string",
"description": "The user's career goal or target job/industry to analyze against.",
},
},
"required": ["resume_text", "career_goal"],
},
}
},
{
"type": "function",
"function": {
"name": "analyze_portfolio",
"description": "Analyze a user's portfolio based on a URL (if provided) and a description, offering feedback relative to their career goal.",
"parameters": {
"type": "object",
"properties": {
"portfolio_url": {
"type": "string",
"description": "URL to the user's online portfolio (optional).",
},
"portfolio_description": {
"type": "string",
"description": "Detailed description of the portfolio's content, purpose, and structure.",
},
"career_goal": {
"type": "string",
"description": "The user's career goal or target job/industry to analyze against.",
},
},
"required": ["portfolio_description", "career_goal"],
},
}
},
{
"type": "function",
"function": {
"name": "extract_and_rate_skills_from_resume",
"description": "Extracts key skills from resume text and rates them on a scale of 1-10 based on apparent proficiency shown in the resume.",
"parameters": {
"type": "object",
"properties": {
"resume_text": {
"type": "string",
"description": "The full text of the user's resume.",
},
"max_skills": {
"type": "integer",
"description": "Maximum number of skills to extract (default 8).",
},
},
"required": ["resume_text"],
},
}
}
]
# --- User Database Functions (Unchanged, adapted for history format if needed) ---
# [Previous database functions load_user_database, save_user_database, get_user_profile, update_user_profile, etc. remain largely the same]
# Ensure chat history format matches OpenAI's expected {role: 'user'/'assistant', content: 'message'}
def load_user_database():
"""Load user database from JSON file or create if it doesn't exist"""
try:
with open(USER_DB_PATH, 'r') as file:
db = json.load(file)
# Ensure chat history uses 'content' key for OpenAI compatibility
for user_id in db.get('users', {}):
if 'chat_history' not in db['users'][user_id]:
db['users'][user_id]['chat_history'] = []
else:
# Convert old format if necessary
for msg in db['users'][user_id]['chat_history']:
if 'message' in msg and 'content' not in msg:
msg['content'] = msg.pop('message')
return db
except (FileNotFoundError, json.JSONDecodeError):
db = {'users': {}}
save_user_database(db)
return db
def save_user_database(db):
"""Save user database to JSON file"""
with open(USER_DB_PATH, 'w') as file:
json.dump(db, file, indent=4)
def get_user_profile(user_id):
"""Get user profile from database or create new one"""
db = load_user_database()
if user_id not in db['users']:
db['users'][user_id] = {
"user_id": user_id,
"name": "",
"location": "",
"current_emotion": "",
"career_goal": "",
"progress_points": 0,
"completed_tasks": [],
"upcoming_events": [],
"routine_history": [],
"daily_emotions": [],
"resume_path": "",
"portfolio_path": "",
"recommendations": [],
"chat_history": [], # Initialize chat history
"joined_date": datetime.now().strftime("%Y-%m-%d")
}
save_user_database(db)
# Ensure chat history uses 'content' key
elif 'chat_history' not in db['users'][user_id] or \
(db['users'][user_id]['chat_history'] and 'content' not in db['users'][user_id]['chat_history'][0]):
if 'chat_history' not in db['users'][user_id]:
db['users'][user_id]['chat_history'] = []
else:
for msg in db['users'][user_id]['chat_history']:
if 'message' in msg and 'content' not in msg:
msg['content'] = msg.pop('message')
save_user_database(db)
return db['users'][user_id]
def update_user_profile(user_id, updates):
"""Update user profile with new information"""
db = load_user_database()
if user_id in db['users']:
for key, value in updates.items():
db['users'][user_id][key] = value
save_user_database(db)
return db['users'][user_id]
def add_task_to_user(user_id, task):
"""Add a new task to user's completed tasks"""
db = load_user_database()
if user_id in db['users']:
if 'completed_tasks' not in db['users'][user_id]:
db['users'][user_id]['completed_tasks'] = []
task_with_date = {
"task": task,
"date": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
db['users'][user_id]['completed_tasks'].append(task_with_date)
db['users'][user_id]['progress_points'] += random.randint(10, 25) # Keep random points for now
save_user_database(db)
return db['users'][user_id]
def add_emotion_record(user_id, emotion):
"""Add a new emotion record to user's daily emotions"""
db = load_user_database()
if user_id in db['users']:
if 'daily_emotions' not in db['users'][user_id]:
db['users'][user_id]['daily_emotions'] = []
emotion_record = {
"emotion": emotion,
"date": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
db['users'][user_id]['daily_emotions'].append(emotion_record)
db['users'][user_id]['current_emotion'] = emotion # Update current emotion
save_user_database(db)
return db['users'][user_id]
def add_routine_to_user(user_id, routine):
"""Add a new routine to user's routine history"""
db = load_user_database()
if user_id in db['users']:
if 'routine_history' not in db['users'][user_id]:
db['users'][user_id]['routine_history'] = []
routine_with_date = {
"routine": routine, # The AI generated routine JSON
"start_date": datetime.now().strftime("%Y-%m-%d"),
"end_date": (datetime.now() + timedelta(days=routine.get('days', 7))).strftime("%Y-%m-%d"),
"completion": 0 # Start completion at 0
}
# Prepend to make the latest routine first (optional)
db['users'][user_id]['routine_history'].insert(0, routine_with_date)
save_user_database(db)
return db['users'][user_id]
def save_user_resume(user_id, resume_text):
"""Save user's resume text to file and update profile path."""
if not resume_text: return None
filename = f"{user_id}_resume.txt"
filepath = os.path.join(RESUME_FOLDER, filename)
try:
with open(filepath, 'w', encoding='utf-8') as file:
file.write(resume_text)
update_user_profile(user_id, {"resume_path": filepath})
logger.info(f"Resume saved for user {user_id} at {filepath}")
return filepath
except Exception as e:
logger.error(f"Error saving resume for user {user_id}: {e}")
return None
def save_user_portfolio(user_id, portfolio_url, portfolio_description):
"""Save user's portfolio info (URL and description) to file."""
if not portfolio_description: return None
filename = f"{user_id}_portfolio.json"
filepath = os.path.join(PORTFOLIO_FOLDER, filename)
portfolio_content = {
"url": portfolio_url,
"description": portfolio_description,
"saved_date": datetime.now().isoformat()
}
try:
with open(filepath, 'w', encoding='utf-8') as file:
json.dump(portfolio_content, file, indent=4)
update_user_profile(user_id, {"portfolio_path": filepath})
logger.info(f"Portfolio info saved for user {user_id} at {filepath}")
return filepath
except Exception as e:
logger.error(f"Error saving portfolio info for user {user_id}: {e}")
return None
def add_recommendation_to_user(user_id, recommendation):
"""Add a new recommendation object to user's list"""
db = load_user_database()
if user_id in db['users']:
if 'recommendations' not in db['users'][user_id]:
db['users'][user_id]['recommendations'] = []
recommendation_with_date = {
"recommendation": recommendation, # The AI generated recommendation object
"date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"status": "pending" # pending, completed, dismissed
}
# Add to the beginning of the list
db['users'][user_id]['recommendations'].insert(0, recommendation_with_date)
# Optional: Limit the number of stored recommendations
max_recs = 20
if len(db['users'][user_id]['recommendations']) > max_recs:
db['users'][user_id]['recommendations'] = db['users'][user_id]['recommendations'][:max_recs]
save_user_database(db)
return db['users'][user_id]
def add_chat_message(user_id, role, content):
"""Add a message to the user's chat history using OpenAI format."""
db = load_user_database()
if user_id in db['users']:
if 'chat_history' not in db['users'][user_id]:
db['users'][user_id]['chat_history'] = []
# Basic validation
if role not in ['user', 'assistant', 'system', 'tool']:
logger.warning(f"Invalid role '{role}' provided for chat message.")
return db['users'][user_id]
if not content and role != 'tool': # Tool messages can have null content initially
logger.warning(f"Empty content provided for chat role '{role}'.")
# return db['users'][user_id] # Allow empty content for now?
chat_message = {
"role": role,
"content": content, # Use 'content' key
"timestamp": datetime.now().isoformat() # Use ISO format
}
db['users'][user_id]['chat_history'].append(chat_message)
# Optional: Limit chat history length
max_history = 50 # Keep last 50 messages (user + assistant)
if len(db['users'][user_id]['chat_history']) > max_history:
# Keep system prompt + last N messages
system_msgs = [m for m in db['users'][user_id]['chat_history'] if m['role'] == 'system']
other_msgs = [m for m in db['users'][user_id]['chat_history'] if m['role'] != 'system']
db['users'][user_id]['chat_history'] = system_msgs + other_msgs[-max_history:]
save_user_database(db)
return db['users'][user_id]
# --- Tool Implementation Functions ---
# These functions are called when the AI decides to use a tool.
def get_job_opportunities(query: str, location: str, max_results: int = 5) -> str:
"""
Searches for job opportunities using the Serper API based on a query and location.
Returns a JSON string of the search results or an error message.
"""
logger.info(f"Executing tool: get_job_opportunities(query='{query}', location='{location}', max_results={max_results})")
if not SERPER_API_KEY:
return json.dumps({"error": "Serper API key is not configured."})
try:
headers = {
'X-API-KEY': SERPER_API_KEY,
'Content-Type': 'application/json'
}
params = {
'q': f"{query} jobs in {location}",
'num': max_results,
'location': location # Add location parameter explicitly if API supports it
}
logger.info(f"Calling Serper API with params: {params}")
response = requests.get(
'https://serper.dev/search', # Use the correct Serper endpoint
headers=headers,
params=params,
timeout=10 # Add a timeout
)
response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
data = response.json()
logger.info(f"Serper API response received (keys: {data.keys()})")
# Extract relevant job listings (adapt based on Serper's actual output structure)
job_results = []
# Check 'jobs' key first, as it's common in job search results
if 'jobs' in data and isinstance(data['jobs'], list):
for item in data['jobs']:
job_results.append({
'title': item.get('title', 'N/A'),
'company': item.get('company_name', item.get('source', 'Unknown Company')), # Try different fields
'description': item.get('description', item.get('snippet', 'No description provided.')),
'link': item.get('link', '#'),
'location': item.get('location', location), # Use provided location if not in result
'date_posted': item.get('detected_extensions', {}).get('posted_at', 'N/A') # Example nested field
})
# Fallback to organic results if 'jobs' key is not present or empty
elif 'organic' in data and not job_results:
logger.info("Parsing 'organic' results for jobs.")
for item in data['organic']:
# Heuristic check if it looks like a job listing
title = item.get('title', '')
snippet = item.get('snippet', '')
if any(keyword in title.lower() for keyword in ['job', 'career', 'hiring', 'position', 'vacancy']) or \
any(keyword in snippet.lower() for keyword in ['apply', 'responsibilities', 'qualifications']):
job_results.append({
'title': title,
'company': item.get('source', extract_company_from_title(title)), # Use source or extract
'description': snippet,
'link': item.get('link', '#'),
'location': location, # Serper organic results might not specify location clearly
'date_posted': 'Recent' # Often not available in organic results
})
if not job_results:
logger.warning(f"No job results extracted from Serper response for query '{query}' in '{location}'.")
return json.dumps({"message": "No job opportunities found for your query.", "results": []})
logger.info(f"Extracted {len(job_results)} job results.")
# Return results as a JSON string for the AI
return json.dumps({"message": f"Found {len(job_results)} potential job opportunities.", "results": job_results})
except requests.exceptions.RequestException as e:
logger.error(f"Error calling Serper API: {e}")
return json.dumps({"error": f"Could not connect to job search service: {e}"})
except Exception as e:
logger.error(f"Exception in get_job_opportunities tool: {e}")
return json.dumps({"error": f"An unexpected error occurred during job search: {e}"})
def extract_company_from_title(title):
"""Simple helper to guess company name from job title string."""
# Improved heuristic
delimiters = [' at ', ' - ', ' | ', ' hiring ', ' for ']
for delim in delimiters:
if delim in title:
parts = title.split(delim)
# Take the part after the delimiter, unless it looks like a job title itself
potential_company = parts[-1].strip()
if len(potential_company) > 1 and not any(kw in potential_company.lower() for kw in ['developer', 'manager', 'engineer', 'analyst']):
return potential_company
# If no delimiter found or extraction failed, return default
return "Unknown Company"
# --- Implement other tool functions ---
def generate_document_template(document_type: str, career_field: str = "", experience_level: str = "") -> str:
"""Generates a basic markdown template for the specified document type."""
logger.info(f"Executing tool: generate_document_template(document_type='{document_type}', career_field='{career_field}', experience_level='{experience_level}')")
# This function *could* call the AI again for a more detailed template,
# but for simplicity, we'll return a predefined basic structure here.
# A real implementation would likely use the AI.
template = f"## Basic Template: {document_type}\n\n"
template += f"**Target Field:** {career_field or 'Not specified'}\n"
template += f"**Experience Level:** {experience_level or 'Not specified'}\n\n"
if "resume" in document_type.lower():
template += (
"### Contact Information\n"
"- Name:\n- Phone:\n- Email:\n- LinkedIn:\n- Portfolio (Optional):\n\n"
"### Summary/Objective\n"
"- [Write 2-3 sentences summarizing your key skills and career goals relevant to the target field/job]\n\n"
"### Experience\n"
"- **Company Name** | Location | Job Title | Start Date - End Date\n"
" - [Quantifiable achievement 1 using action verbs]\n"
" - [Quantifiable achievement 2 using action verbs]\n\n"
"### Education\n"
"- University Name | Degree | Graduation Date\n\n"
"### Skills\n"
"- Technical Skills: [List relevant software, tools, languages]\n"
"- Soft Skills: [List relevant interpersonal skills]\n"
)
elif "cover letter" in document_type.lower():
template += (
"[Your Name]\n[Your Address]\n[Your Phone]\n[Your Email]\n\n"
"[Date]\n\n"
"[Hiring Manager Name (if known), or Title]\n[Company Name]\n[Company Address]\n\n"
"Dear [Mr./Ms./Mx. Hiring Manager Last Name or Hiring Team],\n\n"
"**Introduction:** [State the position you're applying for and where you saw it. Briefly mention your key qualification or enthusiasm.]\n\n"
"**Body Paragraph(s):** [Connect your skills and experience directly to the job requirements. Provide specific examples. Explain why you are interested in this company and role.]\n\n"
"**Conclusion:** [Reiterate your interest and key qualification. State your call to action (e.g., looking forward to discussing). Thank the reader.]\n\n"
"Sincerely,\n[Your Name]"
)
else:
template += "[Structure for this document type needs to be defined.]"
return json.dumps({"template_markdown": template}) # Return as JSON string
def create_personalized_routine(emotion: str, goal: str, available_time_minutes: int = 60, routine_length_days: int = 7) -> str:
"""Creates a basic personalized routine structure."""
logger.info(f"Executing tool: create_personalized_routine(emotion='{emotion}', goal='{goal}', time={available_time_minutes}, days={routine_length_days})")
# Similar to template generation, this could call the AI for a detailed plan.
# Here, we generate a basic fallback structure.
# A real implementation should use the AI for better personalization.
routine = generate_basic_routine(emotion, goal, available_time_minutes, routine_length_days) # Use the existing fallback
logger.info(f"Generated basic routine: {routine['name']}")
# Add routine to user profile
# user_profile = add_routine_to_user(session_user_id, routine) # Need user_id here! Pass it if possible.
# For now, just return the routine structure. The main chat logic should handle saving it.
return json.dumps(routine) # Return JSON string
def analyze_resume(resume_text: str, career_goal: str) -> str:
"""Provides a basic analysis structure for the resume."""
logger.info(f"Executing tool: analyze_resume(career_goal='{career_goal}', resume_length={len(resume_text)})")
# This should ideally call the AI for actual analysis.
# Returning a placeholder structure for now.
analysis = {
"strengths": ["Identified strength 1 based on AI analysis (placeholder).", "Identified strength 2 (placeholder)."],
"areas_for_improvement": ["Suggestion 1 for improvement (placeholder).", "Suggestion 2 based on goal alignment (placeholder)."],
"format_feedback": "General feedback on format (placeholder).",
"content_feedback": f"Feedback on content relevance to '{career_goal}' (placeholder).",
"next_steps": ["Recommended action 1 (placeholder).", "Recommended action 2 (placeholder)."]
}
# Save the resume text (need user_id)
# save_user_resume(session_user_id, resume_text) # Pass user_id if available
return json.dumps({"analysis": analysis}) # Return JSON string
def analyze_portfolio(portfolio_description: str, career_goal: str, portfolio_url: str = "") -> str:
"""Provides a basic analysis structure for the portfolio."""
logger.info(f"Executing tool: analyze_portfolio(career_goal='{career_goal}', url='{portfolio_url}', desc_length={len(portfolio_description)})")
# Placeholder analysis
analysis = {
"alignment_with_goal": f"Assessment of alignment with '{career_goal}' (placeholder).",
"strengths": ["Portfolio strength 1 (placeholder).", "Portfolio strength 2 (placeholder)."],
"areas_for_improvement": ["Suggestion 1 for portfolio enhancement (placeholder)."],
"presentation_feedback": "Feedback on presentation/UX (placeholder).",
"next_steps": ["Recommended action for portfolio (placeholder)."]
}
# Save portfolio info (need user_id)
# save_user_portfolio(session_user_id, portfolio_url, portfolio_description) # Pass user_id if available
return json.dumps({"analysis": analysis}) # Return JSON string
def extract_and_rate_skills_from_resume(resume_text: str, max_skills: int = 8) -> str:
"""
Placeholder function to simulate skill extraction and rating.
In a real scenario, this would involve more sophisticated NLP or another AI call.
"""
logger.info(f"Executing tool: extract_and_rate_skills_from_resume(resume_length={len(resume_text)}, max_skills={max_skills})")
# Simple keyword spotting for demonstration
possible_skills = ["Python", "Java", "Project Management", "Communication", "Data Analysis", "Teamwork", "Leadership", "SQL", "React", "Customer Service", "Problem Solving", "Microsoft Office"]
found_skills = []
resume_lower = resume_text.lower()
for skill in possible_skills:
if skill.lower() in resume_lower:
# Assign a random score for demonstration
found_skills.append({"name": skill, "score": random.randint(4, 9)})
if len(found_skills) >= max_skills:
break
# Ensure we return *some* skills if none automatically found
if not found_skills:
found_skills = [
{"name": "Communication", "score": random.randint(5,8)},
{"name": "Teamwork", "score": random.randint(5,8)},
{"name": "Problem Solving", "score": random.randint(5,8)},
]
logger.info(f"Extracted skills (placeholder): {[s['name'] for s in found_skills]}")
return json.dumps({"skills": found_skills[:max_skills]}) # Return JSON string
# --- AI Interaction Logic (Using OpenAI) ---
def get_ai_response(user_id: str, user_input: str, generate_recommendations: bool = True) -> str:
"""
Gets a response from the OpenAI API, handling context, system prompt, and tool calls.
"""
logger.info(f"Getting AI response for user {user_id}. Input: '{user_input[:100]}...'")
if not client:
return "I apologize, the AI service is currently unavailable. Please check the configuration."
try:
user_profile = get_user_profile(user_id)
# --- System Prompt ---
system_prompt = f"""
You are Aishura, an emotionally intelligent AI career assistant. Your primary goal is to provide empathetic,
realistic, and actionable career guidance. Always follow these steps:
1. Acknowledge the user's message and, if applicable, their expressed emotion (from their profile: '{user_profile.get('current_emotion', 'Not specified')}' or message). Use empathetic language.
2. Directly address the user's query or statement.
3. Proactively offer relevant support using your tools: suggest searching for jobs (`get_job_opportunities`), generating document templates (`generate_document_template`), creating a personalized routine (`create_personalized_routine`), analyzing their resume (`analyze_resume`) or portfolio (`analyze_portfolio`) if they've provided them or mention doing so.
4. Tailor your response based on the user's profile:
- Name: {user_profile.get('name', 'User')}
- Location: {user_profile.get('location', 'Not specified')}
- Stated Career Goal: {user_profile.get('career_goal', 'Not specified')}
- Recent Emotion: {user_profile.get('current_emotion', 'Not specified')}
5. If the user has uploaded a resume or portfolio (check profile paths: resume='{user_profile.get('resume_path', '')}', portfolio='{user_profile.get('portfolio_path', '')}'), mention you can analyze them or use insights from previous analysis if available.
6. Keep responses concise, friendly, and focused on next steps. Avoid overly long paragraphs.
7. Use markdown for formatting (bolding, lists) where appropriate.
"""
# --- Build Message History ---
messages = [{"role": "system", "content": system_prompt}]
# Add recent chat history (ensure it's in OpenAI format)
chat_history = user_profile.get('chat_history', [])
# Append only user/assistant messages with 'content' key
for msg in chat_history:
if msg.get('role') in ['user', 'assistant'] and 'content' in msg:
messages.append({"role": msg['role'], "content": msg['content']})
elif msg.get('role') == 'tool' and 'tool_call_id' in msg and 'name' in msg and 'content' in msg:
# Reconstruct tool call response message correctly
messages.append({
"role": "tool",
"tool_call_id": msg['tool_call_id'],
"name": msg['name'],
"content": msg['content'] # Content should be the JSON string result from the tool function
})
# Add current user input
messages.append({"role": "user", "content": user_input})
# --- Initial API Call ---
logger.info(f"Sending {len(messages)} messages to OpenAI model {MODEL_ID}.")
response = client.chat.completions.create(
model=MODEL_ID,
messages=messages,
tools=tools_list,
tool_choice="auto", # Let the model decide whether to use tools
temperature=0.7,
max_tokens=1024 # Adjust as needed
)
response_message = response.choices[0].message
logger.info("Received initial response from OpenAI.")
# --- Tool Call Handling ---
tool_calls = response_message.tool_calls
if tool_calls:
logger.info(f"AI requested {len(tool_calls)} tool call(s): {[tc.function.name for tc in tool_calls]}")
# Append the assistant's response message that contains the tool calls
messages.append(response_message)
# --- Execute Tools and Get Results ---
available_functions = {
"get_job_opportunities": get_job_opportunities,
"generate_document_template": generate_document_template,
"create_personalized_routine": create_personalized_routine,
"analyze_resume": analyze_resume,
"analyze_portfolio": analyze_portfolio,
"extract_and_rate_skills_from_resume": extract_and_rate_skills_from_resume,
}
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions.get(function_name)
function_args = json.loads(tool_call.function.arguments) # Arguments are provided as a JSON string
if function_to_call:
try:
# Special handling for functions needing user_id or profile info
if function_name in ["analyze_resume", "analyze_portfolio", "create_personalized_routine"]:
# Add user_id or necessary profile elements to args if needed by the function
# e.g., function_args['user_id'] = user_id
# Pass career goal from profile if not in direct args for analysis functions
if function_name == "analyze_resume" and 'career_goal' not in function_args:
function_args['career_goal'] = user_profile.get('career_goal', 'Not specified')
if function_name == "analyze_portfolio" and 'career_goal' not in function_args:
function_args['career_goal'] = user_profile.get('career_goal', 'Not specified')
# Save files when analysis tools are called
if function_name == "analyze_resume":
save_user_resume(user_id, function_args.get('resume_text', ''))
if function_name == "analyze_portfolio":
save_user_portfolio(user_id, function_args.get('portfolio_url', ''), function_args.get('portfolio_description', ''))
# Call the function with unpacked arguments
logger.info(f"Calling function '{function_name}' with args: {function_args}")
function_response = function_to_call(**function_args)
logger.info(f"Function '{function_name}' returned (type: {type(function_response)}): {str(function_response)[:200]}...")
# Append tool response to messages
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response, # Must be a string (JSON string in our case)
}
)
# Also add tool call result to chat history DB
add_chat_message(user_id, "tool", {
"tool_call_id": tool_call.id,
"name": function_name,
"content": function_response # Save the JSON string result
})
except Exception as e:
logger.error(f"Error executing function {function_name}: {e}")
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps({"error": f"Failed to execute tool {function_name}: {e}"}),
}
)
# Also add error to chat history DB
add_chat_message(user_id, "tool", {
"tool_call_id": tool_call.id,
"name": function_name,
"content": json.dumps({"error": f"Failed to execute tool {function_name}: {e}"})
})
else:
logger.warning(f"Function {function_name} requested by AI but not found.")
# Append a message indicating the function wasn't found
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps({"error": f"Tool '{function_name}' is not available."})
}
)
add_chat_message(user_id, "tool", {
"tool_call_id": tool_call.id,
"name": function_name,
"content": json.dumps({"error": f"Tool '{function_name}' is not available."})
})
# --- Second API Call (after tool execution) ---
logger.info(f"Sending {len(messages)} messages to OpenAI (including tool results).")
second_response = client.chat.completions.create(
model=MODEL_ID,
messages=messages,
temperature=0.7,
max_tokens=1024
# No tool_choice here, we expect a natural language response
)
final_response_content = second_response.choices[0].message.content
logger.info("Received final response from OpenAI after tool calls.")
else:
# No tool calls were made, use the first response
final_response_content = response_message.content
logger.info("No tool calls requested by AI.")
# --- Post-processing and Saving ---
if not final_response_content:
final_response_content = "I received that, but I don't have a specific response right now. Could you try rephrasing?"
logger.warning("AI returned empty content.")
# Save user message and final AI response to DB
add_chat_message(user_id, "user", user_input)
# Check if the last message added was the assistant's message with tool calls
if messages[-1]['role'] == 'assistant' and messages[-1].tool_calls:
# Don't add the tool call message itself to the history again,
# just add the final text response
pass
elif messages[-1]['role'] == 'tool':
# If the last message was a tool response, the final content comes from the second call
pass
else:
# If no tools were called, the first response message needs saving
add_chat_message(user_id, "assistant", final_response_content)
# Generate recommendations (consider doing this asynchronously)
if generate_recommendations:
# This could be a separate AI call based on the final interaction
# For simplicity, we'll skip detailed recommendation generation here
# but you would call a function like `gen_recommendations_openai`
# gen_recommendations_openai(user_id, user_input, final_response_content)
pass # Placeholder for recommendation generation logic
return final_response_content
except openai.APIError as e:
logger.error(f"OpenAI API returned an API Error: {e}")
return f"I'm sorry, there was an error communicating with the AI service (API Error: {e.status_code}). Please try again later."
except openai.APIConnectionError as e:
logger.error(f"Failed to connect to OpenAI API: {e}")
return "I'm sorry, I couldn't connect to the AI service. Please check your connection and try again."
except openai.RateLimitError as e:
logger.error(f"OpenAI API request exceeded rate limit: {e}")
return "I'm currently experiencing high demand. Please try again in a few moments."
except Exception as e:
# Log the full traceback for debugging
logger.exception(f"Unexpected error in get_ai_response for user {user_id}: {e}")
return "I apologize, but an unexpected error occurred while processing your request. Please try again."
# --- Recommendation Generation (Placeholder - Adapt for OpenAI) ---
def gen_recommendations_openai(user_id, user_input, ai_response):
"""Generate recommendations using OpenAI (Adapt prompt and parsing)."""
logger.info(f"Generating recommendations for user {user_id}")
if not client:
logger.warning("OpenAI client not available for generating recommendations.")
return []
try:
user_profile = get_user_profile(user_id)
prompt = f"""
Based on the following user profile and recent conversation, generate 1-3 specific, actionable recommendations
for the user's next steps in their career journey. Focus on practical actions they can take soon.
User Profile:
- Current emotion: {user_profile.get('current_emotion', 'Not specified')}
- Career goal: {user_profile.get('career_goal', 'Not specified')}
- Location: {user_profile.get('location', 'Not specified')}
- Recent chat history is available to the main assistant.
Most Recent Interaction:
User: {user_input}
Aishura (AI Assistant): {ai_response}
Generate recommendations in this JSON format only:
```json
[
{{
"title": "Concise recommendation title (e.g., 'Refine Resume Keywords')",
"description": "Detailed explanation of the recommendation and why it's relevant (2-3 sentences).",
"action_type": "job_search | skill_building | networking | resume_update | portfolio_review | interview_prep | mindset_shift | other",
"priority": "high | medium | low"
}}
]
```
Provide only the JSON array, no introductory text.
"""
response = client.chat.completions.create(
model=MODEL_ID, # Or a faster/cheaper model if preferred for this task
messages=[
{"role": "system", "content": "You are an expert career advisor generating concise, actionable recommendations in JSON format."},
{"role": "user", "content": prompt}
],
temperature=0.5,
max_tokens=512,
response_format={"type": "json_object"} # Request JSON output if model supports it
)
recommendation_json_str = response.choices[0].message.content
logger.info(f"Raw recommendations JSON string: {recommendation_json_str}")
# Attempt to parse the JSON
try:
# The response_format parameter should ensure it's valid JSON, but double-check
# Clean potential markdown fences if response_format didn't work
if recommendation_json_str.startswith("```json"):
recommendation_json_str = recommendation_json_str.split("```json")[1].split("```")[0].strip()
# The prompt asks for a list, but response_format might enforce an object. Adjust parsing.
recommendations_data = json.loads(recommendation_json_str)
# If the root is an object with a key like "recommendations", extract the list
if isinstance(recommendations_data, dict) and "recommendations" in recommendations_data and isinstance(recommendations_data["recommendations"], list):
recommendations = recommendations_data["recommendations"]
elif isinstance(recommendations_data, list):
recommendations = recommendations_data # It's already a list
else:
logger.error(f"Unexpected JSON structure for recommendations: {type(recommendations_data)}")
return []
# Add valid recommendations to user profile
valid_recs_added = 0
for rec in recommendations:
# Basic validation of recommendation structure
if isinstance(rec, dict) and all(k in rec for k in ['title', 'description', 'action_type', 'priority']):
add_recommendation_to_user(user_id, rec)
valid_recs_added += 1
else:
logger.warning(f"Skipping invalid recommendation format: {rec}")
logger.info(f"Added {valid_recs_added} recommendations for user {user_id}")
return recommendations # Return the raw list parsed
except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON recommendations from AI response: {e}\nResponse: {recommendation_json_str}")
return []
except Exception as e:
logger.exception(f"Error processing recommendations: {e}")
return []
except Exception as e:
logger.exception(f"Error in gen_recommendations_openai: {e}")
return []
# --- Chart and Visualization Functions (Unchanged, but depend on data format) ---
# [Keep create_emotion_chart, create_progress_chart, create_routine_completion_gauge]
# Ensure they handle the data structures saved by the updated functions correctly.
def create_emotion_chart(user_id):
"""Create a chart of user's emotions over time"""
user_profile = get_user_profile(user_id)
emotion_records = user_profile.get('daily_emotions', [])
if not emotion_records:
fig = go.Figure()
fig.add_annotation(text="No emotion data tracked yet.", align='center', showarrow=False)
fig.update_layout(title="Emotion Tracking")
return fig
emotion_values = {
"Unmotivated": 1, "Anxious": 2, "Confused": 3,
"Discouraged": 4, "Overwhelmed": 5, "Excited": 6
}
dates = [datetime.fromisoformat(record['date']) if isinstance(record['date'], str) else datetime.strptime(record['date'], "%Y-%m-%d %H:%M:%S") for record in emotion_records] # Handle ISO or older format
emotion_scores = [emotion_values.get(record['emotion'], 3) for record in emotion_records]
emotion_names = [record['emotion'] for record in emotion_records]
df = pd.DataFrame({'Date': dates, 'Emotion Score': emotion_scores, 'Emotion': emotion_names})
df = df.sort_values('Date') # Ensure chronological order
fig = px.line(df, x='Date', y='Emotion Score', markers=True,
labels={"Emotion Score": "Emotional State"},
title="Your Emotional Journey")
fig.update_traces(hovertemplate='%{x|%Y-%m-%d %H:%M}<br>Feeling: %{text}', text=df['Emotion'])
fig.update_yaxes(tickvals=list(emotion_values.values()), ticktext=list(emotion_values.keys()))
return fig
def create_progress_chart(user_id):
"""Create a chart showing user's progress points over time"""
user_profile = get_user_profile(user_id)
tasks = user_profile.get('completed_tasks', [])
if not tasks:
fig = go.Figure()
fig.add_annotation(text="No tasks completed yet.", align='center', showarrow=False)
fig.update_layout(title="Progress Tracking")
return fig
# Ensure tasks have points (might need adjustment based on how points are awarded)
points_per_task = 20 # Example: Assign fixed points if not stored with task
dates = []
cumulative_points = 0
points_timeline = []
task_labels = []
# Sort tasks by date
tasks.sort(key=lambda x: datetime.fromisoformat(x['date']) if isinstance(x['date'], str) else datetime.strptime(x['date'], "%Y-%m-%d %H:%M:%S"))
for task in tasks:
task_date = datetime.fromisoformat(task['date']) if isinstance(task['date'], str) else datetime.strptime(task['date'], "%Y-%m-%d %H:%M:%S")
dates.append(task_date)
# Use points from profile if calculated there, otherwise estimate
# We are using the cumulative points stored in the profile directly now
# For simplicity, let's recalculate cumulative points for the chart
cumulative_points += task.get('points', points_per_task) # Use stored points if available
points_timeline.append(cumulative_points)
task_labels.append(task['task'])
df = pd.DataFrame({'Date': dates, 'Points': points_timeline, 'Task': task_labels})
fig = px.line(df, x='Date', y='Points', markers=True, title="Your Career Journey Progress")
fig.update_traces(hovertemplate='%{x|%Y-%m-%d %H:%M}<br>Points: %{y}<br>Completed: %{text}', text=df['Task'])
return fig
def create_routine_completion_gauge(user_id):
"""Create a gauge chart showing routine completion percentage"""
user_profile = get_user_profile(user_id)
routines = user_profile.get('routine_history', [])
if not routines:
fig = go.Figure(go.Indicator(mode="gauge", value=0, title={'text': "Routine Completion"}))
fig.add_annotation(text="No active routine.", showarrow=False)
return fig
# Get the most recent routine (assuming prepend logic)
latest_routine = routines[0]
completion = latest_routine.get('completion', 0)
routine_name = latest_routine.get('routine', {}).get('name', 'Current Routine')
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = completion,
domain = {'x': [0, 1], 'y': [0, 1]},
title = {'text': f"{routine_name} Completion"},
gauge = {
'axis': {'range': [0, 100], 'tickwidth': 1, 'tickcolor': "darkblue"},
'bar': {'color': "cornflowerblue"},
'bgcolor': "white",
'borderwidth': 2,
'bordercolor': "gray",
'steps': [
{'range': [0, 50], 'color': 'whitesmoke'},
{'range': [50, 80], 'color': 'lightgray'}],
'threshold': {
'line': {'color': "green", 'width': 4},
'thickness': 0.75, 'value': 90}})) # Threshold at 90%
return fig
def create_skill_radar_chart(user_id):
"""
Creates a radar chart of user's skills.
Requires skills data, potentially extracted by `extract_and_rate_skills_from_resume` tool.
"""
logger.info(f"Creating skill radar chart for user {user_id}")
user_profile = get_user_profile(user_id)
resume_path = user_profile.get('resume_path')
if not resume_path or not os.path.exists(resume_path):
logger.warning("No resume path found or file missing for skill chart.")
fig = go.Figure()
fig.add_annotation(text="Upload & Analyze Resume for Skill Chart", showarrow=False)
fig.update_layout(title="Skill Assessment")
return fig
try:
with open(resume_path, 'r', encoding='utf-8') as f:
resume_text = f.read()
# Use the tool function to extract skills (simulated call here)
# In a real app, this might be triggered explicitly or data stored after analysis
skills_json_str = extract_and_rate_skills_from_resume(resume_text=resume_text)
skill_data = json.loads(skills_json_str)
if 'skills' in skill_data and skill_data['skills']:
skills = skill_data['skills']
# Limit to max 8 skills for readability
skills = skills[:8]
categories = [skill['name'] for skill in skills]
values = [skill['score'] for skill in skills]
# Ensure the loop closes
if len(categories) > 2:
categories.append(categories[0])
values.append(values[0])
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=values,
theta=categories,
fill='toself',
name='Skills'
))
fig.update_layout(
polar=dict(radialaxis=dict(visible=True, range=[0, 10])),
showlegend=False,
title="Skill Assessment (Based on Resume)"
)
logger.info(f"Successfully created radar chart with {len(skills)} skills.")
return fig
else:
logger.warning("Could not extract skills from resume for chart.")
fig = go.Figure()
fig.add_annotation(text="Could not extract skills from resume", showarrow=False)
fig.update_layout(title="Skill Assessment")
return fig
except Exception as e:
logger.exception(f"Error creating skill radar chart: {e}")
fig = go.Figure()
fig.add_annotation(text="Error analyzing skills", showarrow=False)
fig.update_layout(title="Skill Assessment")
return fig
# --- Gradio Interface Components ---
def create_interface():
"""Create the Gradio interface for Aishura"""
# Generate a unique user ID for this session (can be replaced with login later)
# This state needs careful handling in Gradio for multi-user scenarios.
# Using a simple global or closure for demo purposes.
# A better approach involves Gradio's State management or user handling.
session_user_id = str(uuid.uuid4())
logger.info(f"Initializing Gradio interface for session user ID: {session_user_id}")
# Initialize profile for session user
get_user_profile(session_user_id)
# --- Event Handlers for Gradio Components ---
def welcome(name, location, emotion, goal):
"""Handles the initial welcome screen submission."""
logger.info(f"Welcome action for user {session_user_id}: name='{name}', loc='{location}', emo='{emotion}', goal='{goal}'")
if not all([name, location, emotion, goal]):
return ("Please fill out all fields to get started.",
gr.update(visible=True), # Keep welcome visible
gr.update(visible=False)) # Keep main hidden
# Update profile
update_user_profile(session_user_id, {
"name": name, "location": location, "career_goal": goal
})
add_emotion_record(session_user_id, emotion) # Record initial emotion
# Generate initial AI message based on input
initial_input = f"Hi Aishura! I'm {name} from {location}. I'm currently feeling {emotion}, and my main goal is to {goal}. Can you help me get started?"
ai_response = get_ai_response(session_user_id, initial_input, generate_recommendations=True)
# Initial chat history
initial_chat = [(initial_input, ai_response)]
# Initial charts
emotion_fig = create_emotion_chart(session_user_id)
progress_fig = create_progress_chart(session_user_id)
routine_fig = create_routine_completion_gauge(session_user_id)
skill_fig = create_skill_radar_chart(session_user_id) # Will be empty initially
# Output: Hide welcome, show main, populate initial chat and charts
return (gr.update(value=initial_chat), # Update chatbot
gr.update(visible=False), # Hide welcome group
gr.update(visible=True), # Show main interface
gr.update(figure=emotion_fig),
gr.update(figure=progress_fig),
gr.update(figure=routine_fig),
gr.update(figure=skill_fig)
)
def chat_submit(message, history):
"""Handles sending a message in the chatbot."""
logger.info(f"Chat submit for user {session_user_id}: '{message[:50]}...'")
if not message:
return history, "" # Do nothing if message is empty
ai_response = get_ai_response(session_user_id, message, generate_recommendations=True)
history.append((message, ai_response))
# Update recommendations display after chat
recommendations_md = display_recommendations(session_user_id)
return history, "", gr.update(value=recommendations_md) # Return updated history, clear input, update recs
# --- Simulation for Emotion Messages ---
pause_message = "Take your time, we’re here when you're ready."
retype_message = "It doesn’t have to be perfect. Let’s just begin."
# JS for basic simulation (might need refinement based on Gradio version/behavior)
# This is illustrative; direct JS injection can be tricky/fragile in Gradio.
# We'll use Gradio events for a simpler simulation.
def show_pause_message():
# Simulate showing pause message (e.g., make a Markdown visible)
# In a real app, this needs proper timing logic (JS setTimeout)
# logger.info("Simulating 'pause' message visibility.")
return gr.update(value=pause_message, visible=True)
def show_retype_message():
# Simulate showing retype message
# logger.info("Simulating 'retype' message visibility.")
return gr.update(value=retype_message, visible=True)
def hide_emotion_message():
# logger.info("Hiding emotion message.")
return gr.update(value="", visible=False)
def handle_chat_focus():
"""Called when chat input gains focus."""
# logger.info("Chat input focused.")
# Decide whether to show a message, e.g., maybe the retype one briefly?
# Or just hide any existing message.
return hide_emotion_message() # Hide message on focus for now
# Placeholder: More complex logic would be needed for actual pause/retype detection
# Using .change() with debounce might approximate it, but Gradio support varies.
# --- Tool Interface Handlers ---
def search_jobs_interface_handler(query, location, max_results):
"""Handles the Job Search button click."""
logger.info(f"Manual Job Search UI: query='{query}', loc='{location}', num={max_results}")
# Call the underlying tool function directly for the UI button
results_json_str = get_job_opportunities(query, location, int(max_results))
try:
results_data = json.loads(results_json_str)
if "error" in results_data:
return f"Error: {results_data['error']}"
if not results_data.get("results"):
return "No job opportunities found matching your criteria."
output_md = f"## Job Opportunities Found ({len(results_data['results'])})\n\n"
for i, job in enumerate(results_data['results'], 1):
output_md += f"### {i}. {job.get('title', 'N/A')}\n"
output_md += f"**Company:** {job.get('company', 'N/A')}\n"
output_md += f"**Location:** {job.get('location', location)}\n" # Use search location as fallback
output_md += f"**Description:** {job.get('description', 'N/A')}\n"
output_md += f"**Posted:** {job.get('date_posted', 'N/A')}\n"
link = job.get('link', '#')
output_md += f"**Link:** [{link}]({link})\n\n"
return output_md
except json.JSONDecodeError:
logger.error(f"Failed to parse job search results: {results_json_str}")
return "Error displaying job search results."
except Exception as e:
logger.exception("Error in search_jobs_interface_handler")
return f"An unexpected error occurred: {e}"
def generate_template_interface_handler(doc_type, career_field, experience):
"""Handles Generate Template button click."""
logger.info(f"Manual Template UI: type='{doc_type}', field='{career_field}', exp='{experience}'")
template_json_str = generate_document_template(doc_type, career_field, experience)
try:
template_data = json.loads(template_json_str)
if "error" in template_data:
return f"Error: {template_data['error']}"
return template_data.get('template_markdown', "Could not generate template.")
except json.JSONDecodeError:
logger.error(f"Failed to parse template results: {template_json_str}")
return "Error displaying template."
except Exception as e:
logger.exception("Error in generate_template_interface_handler")
return f"An unexpected error occurred: {e}"
def create_routine_interface_handler(emotion, goal, time_available, days):
"""Handles Create Routine button click."""
logger.info(f"Manual Routine UI: emo='{emotion}', goal='{goal}', time='{time_available}', days='{days}'")
routine_json_str = create_personalized_routine(emotion, goal, int(time_available), int(days))
try:
routine_data = json.loads(routine_json_str)
if "error" in routine_data:
return f"Error: {routine_data['error']}"
# Save the generated routine to the user profile
add_routine_to_user(session_user_id, routine_data)
# Format for display
output_md = f"# Your {routine_data.get('name', 'Personalized Routine')}\n\n"
output_md += f"{routine_data.get('description', '')}\n\n"
for day_plan in routine_data.get('daily_tasks', []):
output_md += f"## Day {day_plan.get('day', '?')}\n"
if not day_plan.get('tasks'):
output_md += "- Rest day or free choice.\n"
else:
for task in day_plan.get('tasks', []):
output_md += f"- **{task.get('name', 'Task')}** "
output_md += f"({task.get('duration', '?')} mins"
if 'points' in task: # Only show points if available
output_md += f", {task.get('points', '?')} points"
output_md += ")\n"
output_md += f" *Why: {task.get('description', '...') }*\n"
output_md += "\n"
# Update the gauge chart as well
gauge_fig = create_routine_completion_gauge(session_user_id)
return output_md, gr.update(figure=gauge_fig) # Return markdown and updated gauge
except json.JSONDecodeError:
logger.error(f"Failed to parse routine results: {routine_json_str}")
return "Error displaying routine.", gr.update() # Return update for gauge too
except Exception as e:
logger.exception("Error in create_routine_interface_handler")
return f"An unexpected error occurred: {e}", gr.update()
def analyze_resume_interface_handler(resume_text):
"""Handles Analyze Resume button click."""
logger.info(f"Manual Resume Analysis UI: length={len(resume_text)}")
if not resume_text:
# Clear previous results if input is empty
return "Please paste your resume text above.", gr.update(figure=None)
user_profile = get_user_profile(session_user_id)
career_goal = user_profile.get('career_goal', 'Not specified') # Get goal from profile
# Save resume first
save_user_resume(session_user_id, resume_text)
# Call analysis tool (placeholder version for now)
analysis_json_str = analyze_resume(resume_text, career_goal)
try:
analysis_data = json.loads(analysis_json_str)
if "error" in analysis_data:
return f"Error: {analysis_data['error']}", gr.update() # Update for chart
# Format analysis for display (adapt based on actual tool output)
analysis = analysis_data.get('analysis', {})
output_md = "## Resume Analysis Results\n\n"
output_md += f"**Analysis against goal:** '{career_goal}'\n\n"
output_md += "**Strengths:**\n" + "\n".join([f"- {s}" for s in analysis.get('strengths', [])]) + "\n\n"
output_md += "**Areas for Improvement:**\n" + "\n".join([f"- {s}" for s in analysis.get('areas_for_improvement', [])]) + "\n\n"
output_md += f"**Format Feedback:** {analysis.get('format_feedback', 'N/A')}\n\n"
output_md += f"**Content Feedback:** {analysis.get('content_feedback', 'N/A')}\n\n"
output_md += "**Suggested Next Steps:**\n" + "\n".join([f"- {s}" for s in analysis.get('next_steps', [])])
# Update skill chart after analysis
skill_fig = create_skill_radar_chart(session_user_id)
return output_md, gr.update(figure=skill_fig)
except json.JSONDecodeError:
logger.error(f"Failed to parse resume analysis results: {analysis_json_str}")
return "Error displaying resume analysis.", gr.update()
except Exception as e:
logger.exception("Error in analyze_resume_interface_handler")
return f"An unexpected error occurred: {e}", gr.update()
def analyze_portfolio_interface_handler(portfolio_url, portfolio_description):
"""Handles Analyze Portfolio button click."""
logger.info(f"Manual Portfolio Analysis UI: url='{portfolio_url}', desc_len={len(portfolio_description)}")
if not portfolio_description:
return "Please provide a description of your portfolio."
user_profile = get_user_profile(session_user_id)
career_goal = user_profile.get('career_goal', 'Not specified') # Get goal from profile
# Save portfolio info first
save_user_portfolio(session_user_id, portfolio_url, portfolio_description)
# Call analysis tool (placeholder)
analysis_json_str = analyze_portfolio(portfolio_description, career_goal, portfolio_url)
try:
analysis_data = json.loads(analysis_json_str)
if "error" in analysis_data:
return f"Error: {analysis_data['error']}"
# Format analysis for display
analysis = analysis_data.get('analysis', {})
output_md = "## Portfolio Analysis Results\n\n"
output_md += f"**Analysis against goal:** '{career_goal}'\n"
if portfolio_url:
output_md += f"**Portfolio URL:** {portfolio_url}\n\n"
output_md += f"**Alignment with Goal:**\n{analysis.get('alignment_with_goal', 'N/A')}\n\n"
output_md += "**Strengths:**\n" + "\n".join([f"- {s}" for s in analysis.get('strengths', [])]) + "\n\n"
output_md += "**Areas for Improvement:**\n" + "\n".join([f"- {s}" for s in analysis.get('areas_for_improvement', [])]) + "\n\n"
output_md += f"**Presentation Feedback:** {analysis.get('presentation_feedback', 'N/A')}\n\n"
output_md += "**Suggested Next Steps:**\n" + "\n".join([f"- {s}" for s in analysis.get('next_steps', [])])
return output_md
except json.JSONDecodeError:
logger.error(f"Failed to parse portfolio analysis results: {analysis_json_str}")
return "Error displaying portfolio analysis."
except Exception as e:
logger.exception("Error in analyze_portfolio_interface_handler")
return f"An unexpected error occurred: {e}"
# --- Progress Tracking Handlers ---
def complete_task_handler(task_name):
"""Handles marking a task as complete."""
logger.info(f"Complete Task UI: task='{task_name}' for user {session_user_id}")
if not task_name:
return ("Please enter the name of the task you completed.", "",
gr.update(), gr.update(), gr.update()) # No chart updates if no task
# Add task and update points
user_profile = add_task_to_user(session_user_id, task_name)
points_earned = 20 # Use a fixed value or get from task data if available
# Update completion % of latest routine
db = load_user_database()
if session_user_id in db['users'] and db['users'][session_user_id].get('routine_history'):
latest_routine_entry = db['users'][session_user_id]['routine_history'][0] # Get latest
# Simple: increment completion by a fixed amount per task (e.g., 5-15%)
# More complex: calculate based on routine definition and completed tasks
increment = random.randint(5, 15)
new_completion = min(100, latest_routine_entry.get('completion', 0) + increment)
latest_routine_entry['completion'] = new_completion
save_user_database(db) # Save updated DB
# Refresh charts
emotion_fig = create_emotion_chart(session_user_id)
progress_fig = create_progress_chart(session_user_id)
gauge_fig = create_routine_completion_gauge(session_user_id)
return (f"Great job completing '{task_name}'! You've earned progress points.",
"", # Clear task input
gr.update(figure=emotion_fig),
gr.update(figure=progress_fig),
gr.update(figure=gauge_fig))
def update_emotion_handler(emotion):
"""Handles updating the user's current emotion."""
logger.info(f"Update Emotion UI: emotion='{emotion}' for user {session_user_id}")
if not emotion:
return "Please select an emotion.", gr.update() # No chart update
add_emotion_record(session_user_id, emotion)
# Refresh emotion chart
emotion_fig = create_emotion_chart(session_user_id)
return f"Your current emotion has been updated to '{emotion}'.", gr.update(figure=emotion_fig)
def display_recommendations(current_user_id):
"""Fetches and formats recommendations for display."""
logger.info(f"Displaying recommendations for user {current_user_id}")
user_profile = get_user_profile(current_user_id)
recommendations = user_profile.get('recommendations', [])
if not recommendations:
return "No recommendations available yet. Chat with Aishura to get personalized suggestions!"
# Show the most recent 5 recommendations (they are prepended)
recent_recs = recommendations[:5]
output_md = "# Your Latest Recommendations\n\n"
if not recent_recs:
output_md += "No recommendations yet."
return output_md
for i, rec_entry in enumerate(recent_recs, 1):
rec = rec_entry.get('recommendation', {}) # Get the actual recommendation object
output_md += f"### {i}. {rec.get('title', 'Recommendation')}\n"
output_md += f"{rec.get('description', 'No details.')}\n"
output_md += f"**Priority:** {rec.get('priority', 'N/A').title()} | "
output_md += f"**Type:** {rec.get('action_type', 'N/A').replace('_', ' ').title()}\n"
# output_md += f"*Generated: {rec_entry.get('date', 'N/A')}*\n" # Optional: show date
output_md += "---\n"
return output_md
# --- Build Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky")) as app:
gr.Markdown("# Aishura - Your AI Career Assistant")
# --- Welcome Screen ---
with gr.Group(visible=True) as welcome_group:
gr.Markdown("## Welcome to Aishura!")
gr.Markdown("Let's get acquainted. Tell me a bit about yourself.")
with gr.Row():
with gr.Column():
name_input = gr.Textbox(label="Your Name", placeholder="e.g., Alex Chen")
location_input = gr.Textbox(label="Your Location", placeholder="e.g., London, UK")
with gr.Column():
emotion_dropdown = gr.Dropdown(choices=EMOTIONS, label="How are you feeling today?")
goal_dropdown = gr.Dropdown(choices=GOAL_TYPES, label="What's your main career goal?")
welcome_button = gr.Button("Start My Journey")
welcome_output = gr.Markdown() # For validation messages
# --- Main App Interface (Initially Hidden) ---
with gr.Group(visible=False) as main_interface:
with gr.Tabs() as tabs:
# --- Chat Tab ---
with gr.TabItem("💬 Chat"):
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(
label="Aishura Assistant",
height=550,
avatar_images=("./user_avatar.png", "./aishura_avatar.png"), # Provide paths to avatar images if available
bubble_full_width=False,
show_copy_button=True
)
# --- Simulated Emotion Message Area ---
emotion_message_area = gr.Markdown("", visible=False, elem_classes="subtle-message") # Hidden initially
# --- Chat Input ---
msg_textbox = gr.Textbox(
show_label=False,
placeholder="Type your message here and press Enter...",
container=False,
scale=1 # Take full width below chatbot
)
with gr.Column(scale=1):
gr.Markdown("### ✨ Recommendations")
recommendation_output = gr.Markdown(value="Chat with Aishura to get recommendations.")
refresh_recs_button = gr.Button("🔄 Refresh Recommendations")
# --- Analysis Tab ---
with gr.TabItem("📊 Analysis"):
with gr.Tabs() as analysis_subtabs:
with gr.TabItem("📄 Resume"):
gr.Markdown("### Resume Analysis")
gr.Markdown("Paste your full resume below. Aishura can analyze it against your career goals and help identify strengths and areas for improvement.")
resume_text_input = gr.Textbox(label="Paste Resume Text Here", lines=15, placeholder="Your resume content...")
analyze_resume_button = gr.Button("Analyze My Resume")
resume_analysis_output = gr.Markdown()
with gr.TabItem("🎨 Portfolio"):
gr.Markdown("### Portfolio Analysis")
gr.Markdown("Provide a link and/or description of your portfolio (e.g., website, GitHub, Behance).")
portfolio_url_input = gr.Textbox(label="Portfolio URL (Optional)", placeholder="[https://your-portfolio.com](https://your-portfolio.com)")
portfolio_desc_input = gr.Textbox(label="Portfolio Description", lines=5, placeholder="Describe your portfolio's purpose, key projects, and target audience...")
analyze_portfolio_button = gr.Button("Analyze My Portfolio")
portfolio_analysis_output = gr.Markdown()
with gr.TabItem("💡 Skills"):
gr.Markdown("### Skill Assessment")
gr.Markdown("This chart visualizes skills identified from your latest resume analysis.")
skill_radar_chart_output = gr.Plot(label="Skill Radar Chart")
# --- Tools Tab ---
with gr.TabItem("🛠️ Tools"):
with gr.Tabs() as tools_subtabs:
with gr.TabItem("🔍 Job Search"):
gr.Markdown("### Find Job Opportunities")
gr.Markdown("Use this tool to search for jobs based on keywords and location.")
job_query_input = gr.Textbox(label="Job Title/Keyword", placeholder="e.g., Software Engineer, Marketing Manager")
job_location_input = gr.Textbox(label="Location", placeholder="e.g., New York, Remote")
job_results_slider = gr.Slider(minimum=5, maximum=20, value=10, step=1, label="Number of Results")
search_jobs_button = gr.Button("Search for Jobs")
job_search_output = gr.Markdown()
with gr.TabItem("📝 Templates"):
gr.Markdown("### Generate Document Templates")
gr.Markdown("Get started with common career documents.")
doc_type_dropdown = gr.Dropdown(choices=["Resume", "Cover Letter", "LinkedIn Summary", "Networking Email"], label="Select Document Type")
doc_field_input = gr.Textbox(label="Career Field (Optional)", placeholder="e.g., Healthcare, Technology")
doc_exp_dropdown = gr.Dropdown(choices=["Entry-Level", "Mid-Career", "Senior-Level", "Student/Intern"], label="Experience Level")
generate_template_button = gr.Button("Generate Template")
template_output_md = gr.Markdown()
with gr.TabItem("📅 Routine"):
gr.Markdown("### Create a Personalized Routine")
gr.Markdown("Develop a daily or weekly plan to work towards your goals, tailored to how you feel.")
routine_emotion_dropdown = gr.Dropdown(choices=EMOTIONS, label="How are you feeling about this goal?")
routine_goal_input = gr.Textbox(label="Specific Goal for this Routine", placeholder="e.g., Apply to 5 jobs, Learn basic Python")
routine_time_slider = gr.Slider(minimum=15, maximum=120, value=45, step=15, label="Minutes Available Per Day")
routine_days_slider = gr.Slider(minimum=3, maximum=21, value=7, step=1, label="Routine Length (Days)")
create_routine_button = gr.Button("Create My Routine")
routine_output_md = gr.Markdown()
# --- Progress Tab ---
with gr.TabItem("📈 Progress"):
gr.Markdown("## Track Your Journey")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Mark Task Complete")
task_input = gr.Textbox(label="Task Name", placeholder="e.g., Updated LinkedIn Profile")
complete_button = gr.Button("Complete Task")
task_output = gr.Markdown()
gr.Markdown("---")
gr.Markdown("### Update Emotion")
new_emotion_dropdown = gr.Dropdown(choices=EMOTIONS, label="How are you feeling now?")
emotion_button = gr.Button("Update Feeling")
emotion_output = gr.Markdown()
with gr.Column(scale=2):
gr.Markdown("### Visualizations")
with gr.Row():
emotion_chart_output = gr.Plot(label="Emotional Journey")
progress_chart_output = gr.Plot(label="Progress Points")
with gr.Row():
routine_gauge_output = gr.Plot(label="Routine Completion")
# Maybe add skill chart here too? Or keep in Analysis.
gr.Markdown("") # Spacer
# --- Event Wiring ---
# Welcome screen action
welcome_button.click(
fn=welcome,
inputs=[name_input, location_input, emotion_dropdown, goal_dropdown],
outputs=[chatbot, welcome_group, main_interface, # Show/hide groups
emotion_chart_output, progress_chart_output, routine_gauge_output, skill_radar_chart_output] # Populate initial charts
)
# Chat submission
msg_textbox.submit(
fn=chat_submit,
inputs=[msg_textbox, chatbot],
outputs=[chatbot, msg_textbox, recommendation_output] # Update chatbot, clear input, refresh recs
)
# Recommendation refresh button
refresh_recs_button.click(
fn=lambda: display_recommendations(session_user_id), # Use lambda to pass user_id
inputs=[],
outputs=[recommendation_output]
)
# --- Simulated Emotion Message Wiring ---
# Simple simulation: Show/hide message on focus/blur (or change)
# msg_textbox.focus(fn=handle_chat_focus, outputs=[emotion_message_area])
# msg_textbox.blur(fn=hide_emotion_message, outputs=[emotion_message_area])
# Example: Show retype message briefly on change, then hide
# msg_textbox.change(fn=show_retype_message, outputs=emotion_message_area).then(
# fn=hide_emotion_message, outputs=emotion_message_area, js="() => { return new Promise(resolve => setTimeout(() => { resolve('') }, 2000)) }")
# Analysis Tab Wiring
analyze_resume_button.click(
fn=analyze_resume_interface_handler,
inputs=[resume_text_input],
outputs=[resume_analysis_output, skill_radar_chart_output] # Update analysis text and skill chart
)
analyze_portfolio_button.click(
fn=analyze_portfolio_interface_handler,
inputs=[portfolio_url_input, portfolio_desc_input],
outputs=[portfolio_analysis_output]
)
# Tools Tab Wiring
search_jobs_button.click(
fn=search_jobs_interface_handler,
inputs=[job_query_input, job_location_input, job_results_slider],
outputs=[job_search_output]
)
generate_template_button.click(
fn=generate_template_interface_handler,
inputs=[doc_type_dropdown, doc_field_input, doc_exp_dropdown],
outputs=[template_output_md]
)
create_routine_button.click(
fn=create_routine_interface_handler,
inputs=[routine_emotion_dropdown, routine_goal_input, routine_time_slider, routine_days_slider],
outputs=[routine_output_md, routine_gauge_output] # Update routine text and gauge chart
)
# Progress Tab Wiring
complete_button.click(
fn=complete_task_handler,
inputs=[task_input],
outputs=[task_output, task_input, # Update message, clear input
emotion_chart_output, progress_chart_output, routine_gauge_output] # Update all charts
)
emotion_button.click(
fn=update_emotion_handler,
inputs=[new_emotion_dropdown],
outputs=[emotion_output, emotion_chart_output] # Update message and emotion chart
)
# Load initial state for elements that need it (e.g., charts if resuming session)
# app.load(...) could be used here if state management was more robust.
return app
# --- Main Execution ---
if __name__ == "__main__":
if not OPENAI_API_KEY or not SERPER_API_KEY:
print("*****************************************************")
print("Warning: API keys for OpenAI or Serper not found.")
print("Please set OPENAI_API_KEY and SERPER_API_KEY environment variables.")
print("You can create a .env file in the same directory:")
print("OPENAI_API_KEY=your_openai_key")
print("SERPER_API_KEY=your_serper_key")
print("*****************************************************")
# Decide whether to exit or continue with limited functionality
# exit(1)
logger.info("Starting Aishura Gradio application...")
aishura_app = create_interface()
# Consider adding share=False for local testing, share=True for public link
aishura_app.launch(share=False)
logger.info("Aishura Gradio application stopped.")