import gradio as gr import fitz # PyMuPDF from langchain_community.embeddings import HuggingFaceEmbeddings import chromadb import uuid from groq import Groq import re import json import os # -------------------- Core Functions -------------------- def setup_embeddings(): return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") def setup_chromadb(): client = chromadb.PersistentClient(path="./chroma_db") return client.get_or_create_collection(name="resumes") def extract_text_from_resume(file): # Get the file path from Gradio's file object file_path = file.name if file_path.endswith(".pdf"): # Open the PDF file directly from the path with fitz.open(file_path) as doc: return "\n".join([page.get_text("text") for page in doc]) elif file_path.endswith(".txt"): # Open the text file directly with open(file_path, "r", encoding="utf-8") as f: return f.read() return "" def extract_candidate_name(resume_text): name_match = re.search(r"([A-Z][a-z]+\s+[A-Z][a-z]+)", resume_text[:500]) if name_match: return name_match.group(1) return "Candidate" def store_resume(text, user_id): chunks = [text[i:i+512] for i in range(0, len(text), 512)] for i, chunk in enumerate(chunks): embedding = embedding_model.embed_query(chunk) collection.add( ids=[f"{user_id}-{i}"], embeddings=[embedding], metadatas=[{"text": chunk}] ) return extract_candidate_name(text) def retrieve_resume(user_id, query): query_embedding = embedding_model.embed_query(query) results = collection.query(query_embeddings=[query_embedding], n_results=3) return "\n".join([doc["text"] for doc in results["metadatas"][0]]) def generate_groq_response(prompt, agent_type, temperature=0.7): system_prompts = { "zero_agent": """You are the initial interviewer. Your role is to warmly greet the candidate by name and ask general background questions to make them comfortable before transitioning to technical topics. Be conversational, friendly, and engaging. Focus on understanding their motivation, work history, and personality.""", "technical_agent": """You are an expert technical interviewer. Analyze the candidate's resume thoroughly and ask highly relevant technical questions that demonstrate your understanding of their background. Your questions should be challenging but fair, focusing on their claimed skills and past projects. Phrase questions clearly and directly.""", "clarification_agent": """You are a supportive interviewer who helps clarify questions when candidates need assistance. When a candidate seems confused or directly asks for clarification, explain the question in simpler terms with examples. If they give a partial answer, ask follow-up questions to help them elaborate. Your goal is to maintain conversation flow and help candidates showcase their knowledge.""", "report_agent": """You are an interview assessment specialist. Create a detailed, constructive report of the interview without scoring or grading the candidate. Identify correct answers with green text and areas for improvement with red text. Focus on suggesting specific technical topics the candidate should study further rather than platforms or resources. Be encouraging and specific in your feedback.""" } client = Groq(api_key=os.environ["GROQ_API_KEY"]) response = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": system_prompts.get(agent_type, "You are an AI interview coach.")}, {"role": "user", "content": prompt} ], temperature=temperature, max_tokens=800 ) return response.choices[0].message.content def strip_markdown(text): text = re.sub(r'\*\*(.*?)\*\*', r'\1', text) text = re.sub(r'\*(.*?)\*', r'\1', text) text = re.sub(r'`(.*?)`', r'\1', text) text = re.sub(r'\[(.*?)\]\((.*?)\)', r'\1', text) text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE) text = re.sub(r'^>\s+', '', text, flags=re.MULTILINE) text = re.sub(r'^\s*[-*_]{3,}\s*$', '', text, flags=re.MULTILINE) text = re.sub(r'^\s*[-*+]\s+', '• ', text, flags=re.MULTILINE) text = re.sub(r'^\s*\d+\.\s+', '', text, flags=re.MULTILINE) return text def strict_agent_monitor(candidate_response): prompt = f""" Candidate Response: "{candidate_response}" Check for these behaviors strictly but fairly: 1. Repeated gibberish or nonsensical keyboard smashing. 2. Harsh, rude, or aggressive language. 3. Profanity or clearly offensive content. If clearly inappropriate (repeated profanity/aggression/gibberish), respond: "INAPPROPRIATE: [reason]" If minor awkwardness, occasional mistakes, or nervousness, respond simply: "ACCEPTABLE" Be forgiving, human-like, and flexible—only flag clear and serious issues. Be human-like: allow up to two minor instances before marking responses as inappropriate. Only flag as inappropriate after clear repeated offenses (3 or more times) or severe disrespect/profanity. """ return generate_groq_response(prompt, "technical_agent", temperature=0.1) # -------------------- Initialize Components -------------------- embedding_model = setup_embeddings() collection = setup_chromadb() # -------------------- Gradio Application -------------------- class InterviewCoach: def __init__(self): self.user_id = str(uuid.uuid4()) self.interview_active = False self.current_step = 0 self.interview_phase = "greeting" self.questions = [] self.responses = [] self.candidate_name = "Candidate" self.needs_clarification = False self.clarification_response = None self.uploaded_file = None def start_interview(self, file): if not file: return "Please upload a resume file first", None, None self.uploaded_file = file self.interview_active = True self.current_step = 0 self.interview_phase = "greeting" self.questions = [] self.responses = [] resume_text = extract_text_from_resume(file) self.candidate_name = store_resume(resume_text, self.user_id) resume_data = retrieve_resume(self.user_id, "background experience") greeting = self.zero_agent_greeting(resume_data, self.candidate_name) self.questions.append(greeting) return f"Interview started with {self.candidate_name}", greeting, gr.update(visible=True) def zero_agent_greeting(self, resume_data, candidate_name): prompt = f""" Resume Data: {resume_data} Candidate Name: {candidate_name} Generate a brief, warm greeting for {candidate_name}. The greeting should: 1. Begin with "Hello [Candidate Name]" 2. Very briefly mention something from their resume (one skill or experience) 3. Ask ONE simple question about their most recent job or experience 4. Keep it extremely concise (2-3 short sentences maximum) The greeting must be brief as it will be converted to voice later. """ return generate_groq_response(prompt, "zero_agent", temperature=0.7) def technical_agent_question(self, resume_data, interview_history, question_count): difficulty = "introductory" if question_count < 2 else "intermediate" if question_count < 4 else "advanced" prompt = f""" Resume Data: {resume_data} Interview History: {interview_history} Question Number: {question_count + 1} Difficulty: {difficulty} Generate a relevant technical interview question based on the candidate's resume. The question should: 1. Be specific to skills or experiences mentioned in their resume 2. Feel like it's coming from someone who has read their background 3. Be appropriately challenging based on their experience level 4. Be directly relevant to their field 5. Be clearly phrased as a question (no preambles or explanations) """ return generate_groq_response(prompt, "technical_agent", temperature=0.7) def clarification_agent_response(self, question, candidate_response, resume_data): needs_clarification = any(phrase in candidate_response.lower() for phrase in ["i don't understand", "can you explain", "not sure", "what do you mean", "confused", "unclear", "can you clarify", "don't know what", "?"]) if needs_clarification: prompt = f""" Original Question: {question} Candidate Response: {candidate_response} Resume Data: {resume_data} The candidate needs clarification. Your task is to: 1. Acknowledge their confusion 2. Explain the question in simpler terms 3. Provide a concrete example to illustrate what you're asking 4. Rephrase the question in a more approachable way IMPORTANT: Respond in a direct, conversational manner WITHOUT any explanation of your reasoning. """ return generate_groq_response(prompt, "clarification_agent", temperature=0.6) else: prompt = f""" Original Question: {question} Candidate Response: {candidate_response} Resume Data: {resume_data} Evaluate if this response is complete or needs a follow-up. If the response is thorough and complete, respond with "COMPLETE". If the response is partial or could benefit from elaboration, provide a specific follow-up question. If the response is off-topic, provide a more specific version of the original question. IMPORTANT: If providing a follow-up question, give ONLY the question itself without any explanation of why you're asking it. """ follow_up = generate_groq_response(prompt, "clarification_agent", temperature=0.6) if "COMPLETE" in follow_up: return None else: question_match = re.search(r"(?:To help|I would|Let me|Could you|What|How|Why|Can you|Tell me|Describe|Explain).*\?", follow_up) if question_match: return question_match.group(0) return follow_up def report_agent_feedback(self, interview_data, resume_data): questions_answers = "\n\n".join([ f"Q{i+1}: {qa['question']}\nAnswer: {qa['answer']}" for i, qa in enumerate(interview_data) ]) prompt = f""" Resume Data: {resume_data} Interview Transcript: {questions_answers} Generate a detailed, visually appealing interview report that: 1. Analyzes each answer without scoring or grading 2. Identifies correct information (prefix with "CORRECT: ") 3. Identifies areas for improvement (prefix with "IMPROVE: ") 4. Recommends 3-5 specific technical topics (not platforms) the candidate should focus on Format guidelines: - Use emojis to make sections more engaging (✅ for correct points, 💡 for improvement areas) - ABSOLUTELY NO MARKDOWN SYNTAX - use plain text only without asterisks, backticks, hashes, etc. - Use simple formatting that works well in HTML - For each question, provide concise bullet-point style feedback - Keep language encouraging and constructive Format the report with these sections: - QUESTION ANALYSIS (for each question) - KEY STRENGTHS - FOCUS AREAS - RECOMMENDED TOPICS Do not include any numerical scores or grades. """ feedback = generate_groq_response(prompt, "report_agent", temperature=0.7) return strip_markdown(feedback) def process_response(self, answer): if not answer.strip(): return "Please provide a response", None, None appropriateness_check = strict_agent_monitor(answer) if "INAPPROPRIATE:" in appropriateness_check: reason = appropriateness_check.split("INAPPROPRIATE:")[1].strip() self.interview_active = False return f"⚠️ Interview Terminated: {reason}", None, gr.update(visible=False) current_question = self.questions[self.current_step] if self.needs_clarification: self.needs_clarification = False self.responses[-1]['clarification'] = self.clarification_response self.responses[-1]['clarification_response'] = answer self.clarification_response = None if self.interview_phase == "greeting": self.interview_phase = "technical" resume_data = retrieve_resume(self.user_id, "technical skills") new_question = self.technical_agent_question(resume_data, "", 0) self.questions.append(new_question) self.current_step += 1 return None, new_question, None elif len(self.responses) >= 6: self.interview_active = False return self.generate_final_report(), None, gr.update(visible=False) else: interview_history = "\n".join([ f"Q: {item['question']}\nA: {item['answer']}" for item in self.responses ]) resume_data = retrieve_resume(self.user_id, "technical skills") new_question = self.technical_agent_question( resume_data, interview_history, len(self.responses) - 1 ) self.questions.append(new_question) self.current_step += 1 return None, new_question, None else: self.responses.append({ 'question': current_question, 'answer': answer }) resume_data = retrieve_resume(self.user_id, current_question) clarification = self.clarification_agent_response( current_question, answer, resume_data ) if clarification: self.needs_clarification = True self.clarification_response = clarification return None, clarification, None else: if self.interview_phase == "greeting": self.interview_phase = "technical" resume_data = retrieve_resume(self.user_id, "technical skills") new_question = self.technical_agent_question(resume_data, "", 0) self.questions.append(new_question) self.current_step += 1 return None, new_question, None elif len(self.responses) >= 6: self.interview_active = False return self.generate_final_report(), None, gr.update(visible=False) else: interview_history = "\n".join([ f"Q: {item['question']}\nA: {item['answer']}" for item in self.responses ]) resume_data = retrieve_resume(self.user_id, "technical skills") new_question = self.technical_agent_question( resume_data, interview_history, len(self.responses) - 1 ) self.questions.append(new_question) self.current_step += 1 return None, new_question, None def generate_final_report(self): resume_data = retrieve_resume(self.user_id, "complete profile") feedback = self.report_agent_feedback(self.responses, resume_data) processed_feedback = [] for qa_index, qa in enumerate(self.responses): question_section = f"Q{qa_index+1}: {qa['question']}" answer_section = f"Answer: {qa['answer']}" correct_parts = re.findall(r"CORRECT:(.*?)(?=IMPROVE:|$)", feedback, re.DOTALL) improve_parts = re.findall(r"IMPROVE:(.*?)(?=CORRECT:|$)", feedback, re.DOTALL) correct_html = "" if qa_index < len(correct_parts) and correct_parts[qa_index].strip(): correct_text = strip_markdown(correct_parts[qa_index].strip()) correct_html = f"""
{correct_text}
{improve_parts[qa_index].strip()}
Below is a detailed breakdown of your interview responses with constructive feedback to help you improve your technical skills.
❝{response['question']}❞
Your Answer:
{response['answer']}
Based on your interview responses, we recommend focusing on these key areas: