Update app.py
Browse files
app.py
CHANGED
@@ -127,35 +127,86 @@ tools_list_groq = [
|
|
127 |
def load_user_database():
|
128 |
# (Identical to v4 - handles OpenAI/Groq compatible format)
|
129 |
try:
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
for user_id in db.get('users', {}):
|
132 |
profile = db['users'][user_id]
|
133 |
if 'chat_history' not in profile or not isinstance(profile['chat_history'], list): profile['chat_history'] = []
|
134 |
else:
|
135 |
fixed_history = []
|
136 |
for msg in profile['chat_history']:
|
|
|
137 |
if isinstance(msg, dict) and 'role' in msg:
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
profile['chat_history'] = fixed_history
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
return db
|
157 |
-
except
|
158 |
-
|
|
|
|
|
|
|
159 |
|
160 |
def save_user_database(db):
|
161 |
# (Identical to v4)
|
@@ -168,14 +219,35 @@ def get_user_profile(user_id):
|
|
168 |
db = load_user_database()
|
169 |
if user_id not in db.get('users', {}):
|
170 |
db['users'] = db.get('users', {})
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
save_user_database(db)
|
173 |
-
|
174 |
-
|
|
|
|
|
|
|
175 |
for key in ['recommendations', 'daily_emotions', 'completed_tasks', 'routine_history', 'strengths', 'areas_for_development', 'values']:
|
176 |
-
if key not in profile
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
return profile
|
178 |
|
|
|
179 |
# --- Database Update Functions (Identical to v4) ---
|
180 |
def update_user_profile(user_id, updates):
|
181 |
db = load_user_database(); profile = db.get('users', {}).get(user_id)
|
@@ -208,7 +280,7 @@ def add_routine_to_user(user_id, routine):
|
|
208 |
if profile:
|
209 |
if 'routine_history' not in profile or not isinstance(profile['routine_history'], list): profile['routine_history'] = []
|
210 |
try: days_delta = int(routine.get('days', 7))
|
211 |
-
except: days_delta = 7
|
212 |
end_date = (datetime.now() + timedelta(days=days_delta)).isoformat()
|
213 |
routine_with_date = { "routine": routine, "start_date": datetime.now().isoformat(), "end_date": end_date, "completion": 0 }
|
214 |
profile['routine_history'].insert(0, routine_with_date); profile['routine_history'] = profile['routine_history'][:10]
|
@@ -283,9 +355,17 @@ def generate_basic_routine(emotion, goal, available_time=60, days=7):
|
|
283 |
logger.info(f"Generating basic fallback routine for emotion={emotion}, goal={goal}")
|
284 |
routine_types = { "job_search": [ {"name": "Research Target Companies", "points": 15, "duration": 20, "description": "Identify 3 potential employers aligned with your goal."}, {"name": "Update LinkedIn Section", "points": 15, "duration": 25, "description": "Refine one section of your LinkedIn profile (e.g., summary, experience)."}, {"name": "Practice STAR Method", "points": 20, "duration": 15, "description": "Outline one experience using the STAR method for interviews."}, {"name": "Find Networking Event", "points": 10, "duration": 10, "description": "Look for one relevant online or local networking event."} ], "skill_building": [ {"name": "Online Tutorial (1 Module)", "points": 25, "duration": 45, "description": "Complete one module of a relevant online course/tutorial."}, {"name": "Read Industry Blog/Article", "points": 10, "duration": 15, "description": "Read and summarize one article about trends in your field."}, {"name": "Small Project Task", "points": 30, "duration": 60, "description": "Dedicate time to a specific task within a personal project."}, {"name": "Review Skill Documentation", "points": 15, "duration": 30, "description": "Read documentation or examples for a skill you're learning."} ], "motivation_wellbeing": [ {"name": "Mindful Reflection", "points": 10, "duration": 10, "description": "Spend 10 minutes reflecting on progress and challenges without judgment."}, {"name": "Set 1-3 Daily Intentions", "points": 10, "duration": 5, "description": "Define small, achievable goals for the day."}, {"name": "Short Break/Walk", "points": 15, "duration": 15, "description": "Take a brief break away from screens, preferably with light movement."}, {"name": "Connect with Support", "points": 20, "duration": 20, "description": "Briefly chat with a friend, mentor, or peer about your journey."} ] }
|
285 |
cleaned_emotion = emotion.split(" ")[0].lower() if " " in emotion else emotion.lower(); negative_emotions = ["unmotivated", "anxious", "confused", "overwhelmed", "discouraged", "stuck"]
|
286 |
-
|
287 |
-
|
288 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
include_wellbeing = cleaned_emotion in negative_emotions or "overwhelmed" in cleaned_emotion; daily_tasks_list = []
|
290 |
for day in range(1, days + 1):
|
291 |
day_tasks, remaining_time, tasks_added_count = [], available_time, 0; possible_tasks = routine_types[base_type].copy()
|
@@ -450,9 +530,17 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
|
|
450 |
# Store the initial assistant message (might contain text and/or tool calls)
|
451 |
assistant_message_for_db = {"content": response_message.content}
|
452 |
if tool_calls:
|
453 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
454 |
add_chat_message(user_id, "assistant", assistant_message_for_db)
|
455 |
|
|
|
456 |
# --- Handle Tool Calls if any ---
|
457 |
if tool_calls:
|
458 |
logger.info(f"Groq requested tool call(s): {[tc.function.name for tc in tool_calls]}")
|
@@ -461,11 +549,13 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
|
|
461 |
available_functions = { "generate_document_template": generate_document_template, "create_personalized_routine": create_personalized_routine, "analyze_resume": analyze_resume, "analyze_portfolio": analyze_portfolio, "extract_and_rate_skills_from_resume": extract_and_rate_skills_from_resume, "search_jobs_courses_skills": search_web_serper, }
|
462 |
|
463 |
# Execute functions and gather results
|
|
|
464 |
for tool_call in tool_calls:
|
465 |
function_name = tool_call.function.name; function_to_call = available_functions.get(function_name); tool_call_id = tool_call.id; function_response_content = None
|
466 |
try:
|
467 |
function_args = json.loads(tool_call.function.arguments)
|
468 |
if function_to_call:
|
|
|
469 |
if function_name == "analyze_resume":
|
470 |
if 'career_goal' not in function_args: function_args['career_goal'] = career_goal
|
471 |
save_user_resume(user_id, function_args.get('resume_text', ''))
|
@@ -474,6 +564,7 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
|
|
474 |
save_user_portfolio(user_id, function_args.get('portfolio_url', ''), function_args.get('portfolio_description', ''))
|
475 |
elif function_name == "search_jobs_courses_skills":
|
476 |
if 'location' not in function_args or not function_args['location']: function_args['location'] = location if location != 'your area' else None
|
|
|
477 |
logger.info(f"Calling function '{function_name}' with args: {function_args}")
|
478 |
function_response_content = function_to_call(**function_args) # Expecting JSON string
|
479 |
logger.info(f"Function '{function_name}' returned: {function_response_content[:200]}...")
|
@@ -481,9 +572,18 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
|
|
481 |
except json.JSONDecodeError as e: logger.error(f"Error decoding args for {function_name}: {tool_call.function.arguments} - {e}"); function_response_content = json.dumps({"error": f"Invalid arguments for tool '{function_name}'."})
|
482 |
except TypeError as e: logger.error(f"Argument mismatch for {function_name}. Args: {function_args}, Error: {e}"); function_response_content = json.dumps({"error": f"Internal error: Tool '{function_name}' called incorrectly."})
|
483 |
except Exception as e: logger.exception(f"Error executing function {function_name}: {e}"); function_response_content = json.dumps({"error": f"Error using the '{function_name}' tool."})
|
484 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
485 |
add_chat_message(user_id, "tool", {"tool_call_id": tool_call_id, "content": function_response_content})
|
486 |
|
|
|
|
|
487 |
# --- Make the second API Call (Streaming this time) ---
|
488 |
logger.info(f"Sending {len(messages)} messages to Groq (incl. tool results) for streaming response.")
|
489 |
try:
|
@@ -518,6 +618,7 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
|
|
518 |
logger.info("No tool calls requested by Groq. Streaming initial response.")
|
519 |
# Need to make the call again with stream=True
|
520 |
try:
|
|
|
521 |
stream = client.chat.completions.create(
|
522 |
model=MODEL_ID,
|
523 |
messages=messages, # History includes system + previous + user
|
@@ -529,23 +630,36 @@ def get_ai_response_stream(user_id: str, user_input: str) -> Generator[str, None
|
|
529 |
|
530 |
# Yield chunks and accumulate full response for DB update
|
531 |
full_response_content = ""
|
|
|
532 |
for chunk in stream:
|
533 |
delta_content = chunk.choices[0].delta.content
|
534 |
if delta_content:
|
535 |
yield delta_content
|
536 |
full_response_content += delta_content
|
|
|
|
|
|
|
|
|
537 |
|
538 |
logger.info("Finished streaming initial response.")
|
539 |
-
# Update the assistant message stored earlier
|
540 |
-
#
|
541 |
-
|
542 |
-
|
543 |
-
if profile['chat_history'][-1]['role'] == 'assistant':
|
|
|
|
|
|
|
544 |
profile['chat_history'][-1]['content'] = full_response_content
|
545 |
-
|
546 |
-
|
|
|
|
|
|
|
|
|
547 |
add_chat_message(user_id, "assistant", {"content": full_response_content})
|
548 |
|
|
|
549 |
return # End the generator
|
550 |
|
551 |
# --- Handle Groq API Errors for the streaming call ---
|
@@ -633,12 +747,8 @@ def create_interface():
|
|
633 |
update_user_profile(session_user_id, profile_updates); add_emotion_record(session_user_id, emotion)
|
634 |
initial_input = f"Hi Aishura! I'm {name} from {location}. Focusing on '{cleaned_goal}' in {industry} ({exp_level}, {work_style}). Feeling {emotion}. Help me start?"
|
635 |
|
636 |
-
# Get the first response (streamed)
|
637 |
-
|
638 |
-
# but let's use the stream function for consistency and get the full message.
|
639 |
-
ai_response_full = ""
|
640 |
-
for chunk in get_ai_response_stream(session_user_id, initial_input):
|
641 |
-
ai_response_full += chunk
|
642 |
|
643 |
initial_chat_display = [[initial_input, ai_response_full]]
|
644 |
e_fig, p_fig, r_fig, s_fig = create_emotion_chart(session_user_id), create_progress_chart(session_user_id), create_routine_completion_gauge(session_user_id), create_skill_radar_chart(session_user_id)
|
@@ -658,14 +768,25 @@ def create_interface():
|
|
658 |
|
659 |
# Stream AI response and update the placeholder chunk by chunk
|
660 |
full_response = ""
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
665 |
|
666 |
-
# After streaming finishes, generate recommendations
|
667 |
-
gen_recommendations_simple(session_user_id)
|
668 |
-
recs_md = display_recommendations(session_user_id)
|
669 |
|
670 |
# Yield final state with recommendations updated
|
671 |
yield history_list_list, gr.update(value=recs_md)
|
@@ -701,6 +822,7 @@ def create_interface():
|
|
701 |
logger.info(f"Manual Resume Analysis UI v5: file={resume_file}")
|
702 |
if resume_file is None: return "Please upload resume.", gr.update(value=None), gr.update(value=None)
|
703 |
try:
|
|
|
704 |
with open(resume_file.name, 'r', encoding='utf-8') as f: resume_text = f.read()
|
705 |
except Exception as e: logger.error(f"Error reading file: {e}"); return f"Error reading file: {e}", gr.update(value=None), gr.update(value=None)
|
706 |
if not resume_text: return "Resume empty.", gr.update(value=None), gr.update(value=None)
|
@@ -852,6 +974,7 @@ if __name__ == "__main__":
|
|
852 |
|
853 |
logger.info("Starting Aishura v5 (Groq Streaming) Gradio application...")
|
854 |
aishura_app = create_interface()
|
|
|
855 |
aishura_app.launch(share=False, debug=False)
|
856 |
logger.info("Aishura Gradio application stopped.")
|
857 |
|
|
|
127 |
def load_user_database():
|
128 |
# (Identical to v4 - handles OpenAI/Groq compatible format)
|
129 |
try:
|
130 |
+
# Check if the file exists and is not empty
|
131 |
+
if not os.path.exists(USER_DB_PATH) or os.path.getsize(USER_DB_PATH) == 0:
|
132 |
+
logger.info(f"DB file '{USER_DB_PATH}' not found or empty. Creating new.")
|
133 |
+
db = {'users': {}}
|
134 |
+
save_user_database(db)
|
135 |
+
return db
|
136 |
+
|
137 |
+
with open(USER_DB_PATH, 'r', encoding='utf-8') as file:
|
138 |
+
db = json.load(file)
|
139 |
+
|
140 |
+
# Ensure 'users' key exists
|
141 |
+
if 'users' not in db:
|
142 |
+
db['users'] = {}
|
143 |
+
|
144 |
for user_id in db.get('users', {}):
|
145 |
profile = db['users'][user_id]
|
146 |
if 'chat_history' not in profile or not isinstance(profile['chat_history'], list): profile['chat_history'] = []
|
147 |
else:
|
148 |
fixed_history = []
|
149 |
for msg in profile['chat_history']:
|
150 |
+
# Increased robustness of validation
|
151 |
if isinstance(msg, dict) and 'role' in msg:
|
152 |
+
role = msg['role']
|
153 |
+
if role in ['user', 'assistant', 'system']:
|
154 |
+
content = msg.get('content')
|
155 |
+
tool_calls = msg.get('tool_calls')
|
156 |
+
# Validate content type (string or None for assistant)
|
157 |
+
if not isinstance(content, (str, type(None))):
|
158 |
+
logger.warning(f"Skipping message with invalid content type for role {role} user {user_id}: {content}")
|
159 |
+
continue
|
160 |
+
# Validate tool_calls structure if present
|
161 |
+
if tool_calls is not None:
|
162 |
+
if not isinstance(tool_calls, list) or not all(isinstance(tc, dict) and 'id' in tc and 'type' in tc and 'function' in tc for tc in tool_calls):
|
163 |
+
logger.warning(f"Skipping message with invalid tool_calls structure for user {user_id}: {tool_calls}")
|
164 |
+
continue
|
165 |
+
# Check consistency: assistant needs content or tool_calls
|
166 |
+
if role == 'assistant' and content is None and tool_calls is None:
|
167 |
+
logger.warning(f"Skipping assistant message with no content and no tool_calls for user {user_id}")
|
168 |
+
continue
|
169 |
+
fixed_history.append(msg)
|
170 |
+
elif role == 'tool':
|
171 |
+
if 'tool_call_id' in msg and 'content' in msg and isinstance(msg.get('content'), str):
|
172 |
+
fixed_history.append(msg)
|
173 |
+
else:
|
174 |
+
logger.warning(f"Skipping invalid tool message structure for user {user_id}: {msg}")
|
175 |
+
# Allow system messages even if content is None (though unlikely)
|
176 |
+
elif role == 'system' and isinstance(msg.get('content'), (str, type(None))):
|
177 |
+
fixed_history.append(msg)
|
178 |
+
else:
|
179 |
+
logger.warning(f"Skipping message with invalid role/content combination for user {user_id}: {msg}")
|
180 |
+
else:
|
181 |
+
logger.warning(f"Skipping unrecognized message structure for user {user_id}: {msg}")
|
182 |
profile['chat_history'] = fixed_history
|
183 |
+
|
184 |
+
# Ensure other lists/fields exist with defaults
|
185 |
+
default_lists = ['recommendations', 'daily_emotions', 'completed_tasks', 'routine_history', 'strengths', 'areas_for_development', 'values']
|
186 |
+
default_strings = ['name', 'location', 'current_emotion', 'career_goal', 'industry', 'preferred_work_style', 'long_term_aspirations', 'resume_path', 'portfolio_path']
|
187 |
+
default_numbers = {'progress_points': 0}
|
188 |
+
default_other = {'experience_level': "Not specified"}
|
189 |
+
|
190 |
+
for key in default_lists:
|
191 |
+
if key not in profile or not isinstance(profile.get(key), list): profile[key] = []
|
192 |
+
for key in default_strings:
|
193 |
+
if key not in profile or not isinstance(profile.get(key), str): profile[key] = "" # Ensure string type
|
194 |
+
for key, default_value in default_numbers.items():
|
195 |
+
if key not in profile or not isinstance(profile.get(key), (int, float)): profile[key] = default_value
|
196 |
+
for key, default_value in default_other.items():
|
197 |
+
if key not in profile: profile[key] = default_value
|
198 |
+
|
199 |
+
return db
|
200 |
+
except json.JSONDecodeError as e:
|
201 |
+
logger.error(f"Error decoding JSON from {USER_DB_PATH}: {e}. Creating new DB.")
|
202 |
+
db = {'users': {}}
|
203 |
+
save_user_database(db)
|
204 |
return db
|
205 |
+
except Exception as e:
|
206 |
+
logger.error(f"Unexpected error loading DB from {USER_DB_PATH}: {e}. Returning empty DB.")
|
207 |
+
# Avoid saving over potentially recoverable data in case of unexpected errors
|
208 |
+
return {'users': {}}
|
209 |
+
|
210 |
|
211 |
def save_user_database(db):
|
212 |
# (Identical to v4)
|
|
|
219 |
db = load_user_database()
|
220 |
if user_id not in db.get('users', {}):
|
221 |
db['users'] = db.get('users', {})
|
222 |
+
# Initialize enhanced profile structure
|
223 |
+
db['users'][user_id] = {
|
224 |
+
"user_id": user_id, "name": "", "location": "", "industry": "",
|
225 |
+
"experience_level": "Not specified", "preferred_work_style": "Any",
|
226 |
+
"values": [], "strengths": [], "areas_for_development": [],
|
227 |
+
"long_term_aspirations": "", "current_emotion": "", "career_goal": "",
|
228 |
+
"progress_points": 0, "completed_tasks": [], "upcoming_events": [],
|
229 |
+
"routine_history": [], "daily_emotions": [], "resume_path": "",
|
230 |
+
"portfolio_path": "", "recommendations": [],
|
231 |
+
"chat_history": [], # Stores history in OpenAI/Groq compatible format
|
232 |
+
"joined_date": datetime.now().isoformat()
|
233 |
+
}
|
234 |
save_user_database(db)
|
235 |
+
|
236 |
+
# Re-ensure critical lists exist after potential creation or loading issues
|
237 |
+
profile = db.get('users', {}).get(user_id, {}) # Get potentially newly created profile
|
238 |
+
if 'chat_history' not in profile or not isinstance(profile.get('chat_history'), list):
|
239 |
+
profile['chat_history'] = []
|
240 |
for key in ['recommendations', 'daily_emotions', 'completed_tasks', 'routine_history', 'strengths', 'areas_for_development', 'values']:
|
241 |
+
if key not in profile or not isinstance(profile.get(key), list):
|
242 |
+
profile[key] = []
|
243 |
+
# Ensure essential string fields exist if somehow missed
|
244 |
+
for key in ['name', 'location', 'current_emotion', 'career_goal', 'industry', 'preferred_work_style', 'long_term_aspirations', 'resume_path', 'portfolio_path']:
|
245 |
+
if key not in profile:
|
246 |
+
profile[key] = ""
|
247 |
+
|
248 |
return profile
|
249 |
|
250 |
+
|
251 |
# --- Database Update Functions (Identical to v4) ---
|
252 |
def update_user_profile(user_id, updates):
|
253 |
db = load_user_database(); profile = db.get('users', {}).get(user_id)
|
|
|
280 |
if profile:
|
281 |
if 'routine_history' not in profile or not isinstance(profile['routine_history'], list): profile['routine_history'] = []
|
282 |
try: days_delta = int(routine.get('days', 7))
|
283 |
+
except (ValueError, TypeError): days_delta = 7 # Handle non-integer values
|
284 |
end_date = (datetime.now() + timedelta(days=days_delta)).isoformat()
|
285 |
routine_with_date = { "routine": routine, "start_date": datetime.now().isoformat(), "end_date": end_date, "completion": 0 }
|
286 |
profile['routine_history'].insert(0, routine_with_date); profile['routine_history'] = profile['routine_history'][:10]
|
|
|
355 |
logger.info(f"Generating basic fallback routine for emotion={emotion}, goal={goal}")
|
356 |
routine_types = { "job_search": [ {"name": "Research Target Companies", "points": 15, "duration": 20, "description": "Identify 3 potential employers aligned with your goal."}, {"name": "Update LinkedIn Section", "points": 15, "duration": 25, "description": "Refine one section of your LinkedIn profile (e.g., summary, experience)."}, {"name": "Practice STAR Method", "points": 20, "duration": 15, "description": "Outline one experience using the STAR method for interviews."}, {"name": "Find Networking Event", "points": 10, "duration": 10, "description": "Look for one relevant online or local networking event."} ], "skill_building": [ {"name": "Online Tutorial (1 Module)", "points": 25, "duration": 45, "description": "Complete one module of a relevant online course/tutorial."}, {"name": "Read Industry Blog/Article", "points": 10, "duration": 15, "description": "Read and summarize one article about trends in your field."}, {"name": "Small Project Task", "points": 30, "duration": 60, "description": "Dedicate time to a specific task within a personal project."}, {"name": "Review Skill Documentation", "points": 15, "duration": 30, "description": "Read documentation or examples for a skill you're learning."} ], "motivation_wellbeing": [ {"name": "Mindful Reflection", "points": 10, "duration": 10, "description": "Spend 10 minutes reflecting on progress and challenges without judgment."}, {"name": "Set 1-3 Daily Intentions", "points": 10, "duration": 5, "description": "Define small, achievable goals for the day."}, {"name": "Short Break/Walk", "points": 15, "duration": 15, "description": "Take a brief break away from screens, preferably with light movement."}, {"name": "Connect with Support", "points": 20, "duration": 20, "description": "Briefly chat with a friend, mentor, or peer about your journey."} ] }
|
357 |
cleaned_emotion = emotion.split(" ")[0].lower() if " " in emotion else emotion.lower(); negative_emotions = ["unmotivated", "anxious", "confused", "overwhelmed", "discouraged", "stuck"]
|
358 |
+
|
359 |
+
# ** FIX Syntax Error Here **
|
360 |
+
base_type = "skill_building" # Default
|
361 |
+
if any(term in goal.lower() for term in ["job", "internship", "company", "freelance", "contract"]):
|
362 |
+
base_type = "job_search"
|
363 |
+
elif any(term in goal.lower() for term in ["skill", "learn", "development"]):
|
364 |
+
base_type = "skill_building"
|
365 |
+
elif "network" in goal.lower():
|
366 |
+
base_type = "job_search"
|
367 |
+
# The 'else' was incorrectly placed on the same line as the elif above.
|
368 |
+
|
369 |
include_wellbeing = cleaned_emotion in negative_emotions or "overwhelmed" in cleaned_emotion; daily_tasks_list = []
|
370 |
for day in range(1, days + 1):
|
371 |
day_tasks, remaining_time, tasks_added_count = [], available_time, 0; possible_tasks = routine_types[base_type].copy()
|
|
|
530 |
# Store the initial assistant message (might contain text and/or tool calls)
|
531 |
assistant_message_for_db = {"content": response_message.content}
|
532 |
if tool_calls:
|
533 |
+
# Ensure tool_calls are serializable (they should be from Groq client)
|
534 |
+
try:
|
535 |
+
assistant_message_for_db['tool_calls'] = [tc.model_dump() for tc in tool_calls]
|
536 |
+
except Exception as dump_err:
|
537 |
+
logger.error(f"Error serializing tool calls: {dump_err}")
|
538 |
+
# Handle error - maybe store without tool calls or with an error placeholder
|
539 |
+
assistant_message_for_db['tool_calls'] = [{"error": "Failed to serialize tool call"}]
|
540 |
+
|
541 |
add_chat_message(user_id, "assistant", assistant_message_for_db)
|
542 |
|
543 |
+
|
544 |
# --- Handle Tool Calls if any ---
|
545 |
if tool_calls:
|
546 |
logger.info(f"Groq requested tool call(s): {[tc.function.name for tc in tool_calls]}")
|
|
|
549 |
available_functions = { "generate_document_template": generate_document_template, "create_personalized_routine": create_personalized_routine, "analyze_resume": analyze_resume, "analyze_portfolio": analyze_portfolio, "extract_and_rate_skills_from_resume": extract_and_rate_skills_from_resume, "search_jobs_courses_skills": search_web_serper, }
|
550 |
|
551 |
# Execute functions and gather results
|
552 |
+
tool_results_for_api = [] # Store results to send back to the model
|
553 |
for tool_call in tool_calls:
|
554 |
function_name = tool_call.function.name; function_to_call = available_functions.get(function_name); tool_call_id = tool_call.id; function_response_content = None
|
555 |
try:
|
556 |
function_args = json.loads(tool_call.function.arguments)
|
557 |
if function_to_call:
|
558 |
+
# Special handling before calling (same as v4)
|
559 |
if function_name == "analyze_resume":
|
560 |
if 'career_goal' not in function_args: function_args['career_goal'] = career_goal
|
561 |
save_user_resume(user_id, function_args.get('resume_text', ''))
|
|
|
564 |
save_user_portfolio(user_id, function_args.get('portfolio_url', ''), function_args.get('portfolio_description', ''))
|
565 |
elif function_name == "search_jobs_courses_skills":
|
566 |
if 'location' not in function_args or not function_args['location']: function_args['location'] = location if location != 'your area' else None
|
567 |
+
|
568 |
logger.info(f"Calling function '{function_name}' with args: {function_args}")
|
569 |
function_response_content = function_to_call(**function_args) # Expecting JSON string
|
570 |
logger.info(f"Function '{function_name}' returned: {function_response_content[:200]}...")
|
|
|
572 |
except json.JSONDecodeError as e: logger.error(f"Error decoding args for {function_name}: {tool_call.function.arguments} - {e}"); function_response_content = json.dumps({"error": f"Invalid arguments for tool '{function_name}'."})
|
573 |
except TypeError as e: logger.error(f"Argument mismatch for {function_name}. Args: {function_args}, Error: {e}"); function_response_content = json.dumps({"error": f"Internal error: Tool '{function_name}' called incorrectly."})
|
574 |
except Exception as e: logger.exception(f"Error executing function {function_name}: {e}"); function_response_content = json.dumps({"error": f"Error using the '{function_name}' tool."})
|
575 |
+
|
576 |
+
# Append result for the next API call
|
577 |
+
tool_results_for_api.append({
|
578 |
+
"tool_call_id": tool_call_id,
|
579 |
+
"role": "tool",
|
580 |
+
"content": function_response_content,
|
581 |
+
})
|
582 |
+
# Also store in DB history
|
583 |
add_chat_message(user_id, "tool", {"tool_call_id": tool_call_id, "content": function_response_content})
|
584 |
|
585 |
+
messages.extend(tool_results_for_api) # Add all tool results to messages
|
586 |
+
|
587 |
# --- Make the second API Call (Streaming this time) ---
|
588 |
logger.info(f"Sending {len(messages)} messages to Groq (incl. tool results) for streaming response.")
|
589 |
try:
|
|
|
618 |
logger.info("No tool calls requested by Groq. Streaming initial response.")
|
619 |
# Need to make the call again with stream=True
|
620 |
try:
|
621 |
+
# We already have the user message in 'messages', just need to call stream
|
622 |
stream = client.chat.completions.create(
|
623 |
model=MODEL_ID,
|
624 |
messages=messages, # History includes system + previous + user
|
|
|
630 |
|
631 |
# Yield chunks and accumulate full response for DB update
|
632 |
full_response_content = ""
|
633 |
+
first_chunk = True
|
634 |
for chunk in stream:
|
635 |
delta_content = chunk.choices[0].delta.content
|
636 |
if delta_content:
|
637 |
yield delta_content
|
638 |
full_response_content += delta_content
|
639 |
+
# Handle potential finish reason if needed (e.g., length limit)
|
640 |
+
if chunk.choices[0].finish_reason:
|
641 |
+
logger.info(f"Streaming finished with reason: {chunk.choices[0].finish_reason}")
|
642 |
+
|
643 |
|
644 |
logger.info("Finished streaming initial response.")
|
645 |
+
# Update the assistant message stored earlier ONLY if it was a placeholder
|
646 |
+
# A better way: store the full message *after* streaming is complete.
|
647 |
+
db = load_user_database()
|
648 |
+
profile = db.get('users', {}).get(user_id)
|
649 |
+
if profile and profile['chat_history'] and profile['chat_history'][-1]['role'] == 'assistant':
|
650 |
+
# Check if the stored message was the one corresponding to this stream
|
651 |
+
# This check is imperfect without unique IDs per message turn.
|
652 |
+
# Assuming the last assistant message is the one to update.
|
653 |
profile['chat_history'][-1]['content'] = full_response_content
|
654 |
+
# Clear potential placeholder tool calls if no real ones occurred
|
655 |
+
profile['chat_history'][-1].pop('tool_calls', None)
|
656 |
+
save_user_database(db) # Save the updated history
|
657 |
+
else:
|
658 |
+
# Fallback: Add as a new message if history seems inconsistent
|
659 |
+
logger.warning("Could not reliably update last assistant message, adding new one.")
|
660 |
add_chat_message(user_id, "assistant", {"content": full_response_content})
|
661 |
|
662 |
+
|
663 |
return # End the generator
|
664 |
|
665 |
# --- Handle Groq API Errors for the streaming call ---
|
|
|
747 |
update_user_profile(session_user_id, profile_updates); add_emotion_record(session_user_id, emotion)
|
748 |
initial_input = f"Hi Aishura! I'm {name} from {location}. Focusing on '{cleaned_goal}' in {industry} ({exp_level}, {work_style}). Feeling {emotion}. Help me start?"
|
749 |
|
750 |
+
# Get the first response (streamed) and accumulate it
|
751 |
+
ai_response_full = "".join(chunk for chunk in get_ai_response_stream(session_user_id, initial_input))
|
|
|
|
|
|
|
|
|
752 |
|
753 |
initial_chat_display = [[initial_input, ai_response_full]]
|
754 |
e_fig, p_fig, r_fig, s_fig = create_emotion_chart(session_user_id), create_progress_chart(session_user_id), create_routine_completion_gauge(session_user_id), create_skill_radar_chart(session_user_id)
|
|
|
768 |
|
769 |
# Stream AI response and update the placeholder chunk by chunk
|
770 |
full_response = ""
|
771 |
+
try:
|
772 |
+
for chunk in get_ai_response_stream(session_user_id, message_text):
|
773 |
+
full_response += chunk
|
774 |
+
history_list_list[-1][1] = full_response # Update the last message in history
|
775 |
+
yield history_list_list, gr.update() # Yield updated history to Gradio
|
776 |
+
except Exception as e:
|
777 |
+
logger.error(f"Error during AI response streaming: {e}")
|
778 |
+
history_list_list[-1][1] = f"Sorry, an error occurred while generating the response: {e}"
|
779 |
+
yield history_list_list, gr.update() # Show error in chat
|
780 |
+
|
781 |
+
|
782 |
+
# After streaming finishes (or errors out), generate recommendations
|
783 |
+
try:
|
784 |
+
gen_recommendations_simple(session_user_id)
|
785 |
+
recs_md = display_recommendations(session_user_id)
|
786 |
+
except Exception as e:
|
787 |
+
logger.error(f"Error generating recommendations: {e}")
|
788 |
+
recs_md = "Error loading recommendations."
|
789 |
|
|
|
|
|
|
|
790 |
|
791 |
# Yield final state with recommendations updated
|
792 |
yield history_list_list, gr.update(value=recs_md)
|
|
|
822 |
logger.info(f"Manual Resume Analysis UI v5: file={resume_file}")
|
823 |
if resume_file is None: return "Please upload resume.", gr.update(value=None), gr.update(value=None)
|
824 |
try:
|
825 |
+
# Gradio File object has .name attribute for the temp path
|
826 |
with open(resume_file.name, 'r', encoding='utf-8') as f: resume_text = f.read()
|
827 |
except Exception as e: logger.error(f"Error reading file: {e}"); return f"Error reading file: {e}", gr.update(value=None), gr.update(value=None)
|
828 |
if not resume_text: return "Resume empty.", gr.update(value=None), gr.update(value=None)
|
|
|
974 |
|
975 |
logger.info("Starting Aishura v5 (Groq Streaming) Gradio application...")
|
976 |
aishura_app = create_interface()
|
977 |
+
# Consider adding server_name="0.0.0.0" to allow access from local network if needed
|
978 |
aishura_app.launch(share=False, debug=False)
|
979 |
logger.info("Aishura Gradio application stopped.")
|
980 |
|