File size: 22,707 Bytes
759ff60
 
 
 
 
 
 
 
 
 
 
 
3612502
759ff60
 
 
 
 
 
c8ce6b9
 
 
 
 
 
04727ee
c8ce6b9
 
 
50329bd
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2d0681
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
3612502
 
 
759ff60
3612502
759ff60
 
 
 
 
 
3612502
 
 
759ff60
04727ee
759ff60
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
 
3612502
 
759ff60
 
 
 
 
 
 
 
 
 
 
 
3612502
 
 
 
 
 
759ff60
 
 
 
 
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612502
759ff60
3612502
759ff60
 
 
 
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612502
 
 
 
 
 
 
 
 
 
 
759ff60
3612502
 
759ff60
 
 
 
 
 
 
 
 
 
 
 
3612502
759ff60
 
 
 
 
 
 
 
 
 
3612502
759ff60
 
3612502
11ef246
759ff60
 
 
3612502
 
759ff60
3612502
759ff60
3612502
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759ff60
3612502
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
import gradio as gr
import fitz  # PyMuPDF
from langchain_community.embeddings import HuggingFaceEmbeddings
import chromadb
import uuid
from groq import Groq
import re
import json
import os

# -------------------- Core Functions --------------------
def setup_embeddings():
    return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")

def setup_chromadb():
    client = chromadb.PersistentClient(path="./chroma_db")
    return client.get_or_create_collection(name="resumes")

def extract_text_from_resume(file):
    # Get the file path from Gradio's file object
    file_path = file.name
    
    if file_path.endswith(".pdf"):
        # Open the PDF file directly from the path
        with fitz.open(file_path) as doc:
            return "\n".join([page.get_text("text") for page in doc])
    elif file_path.endswith(".txt"):
        # Open the text file directly
        with open(file_path, "r", encoding="utf-8") as f:
            return f.read()
    return ""

def extract_candidate_name(resume_text):
    name_match = re.search(r"([A-Z][a-z]+\s+[A-Z][a-z]+)", resume_text[:500])
    if name_match:
        return name_match.group(1)
    return "Candidate"

def store_resume(text, user_id):
    chunks = [text[i:i+512] for i in range(0, len(text), 512)]
    for i, chunk in enumerate(chunks):
        embedding = embedding_model.embed_query(chunk)
        collection.add(
            ids=[f"{user_id}-{i}"],
            embeddings=[embedding],
            metadatas=[{"text": chunk}]
        )
    return extract_candidate_name(text)

def retrieve_resume(user_id, query):
    query_embedding = embedding_model.embed_query(query)
    results = collection.query(query_embeddings=[query_embedding], n_results=3)
    return "\n".join([doc["text"] for doc in results["metadatas"][0]])

def generate_groq_response(prompt, agent_type, temperature=0.7):
    system_prompts = {
        "zero_agent": """You are the initial interviewer. Your role is to warmly greet the candidate by name and ask general background questions to make them comfortable before transitioning to technical topics. Be conversational, friendly, and engaging. Focus on understanding their motivation, work history, and personality.""",
        "technical_agent": """You are an expert technical interviewer. Analyze the candidate's resume thoroughly and ask highly relevant technical questions that demonstrate your understanding of their background. Your questions should be challenging but fair, focusing on their claimed skills and past projects. Phrase questions clearly and directly.""",
        "clarification_agent": """You are a supportive interviewer who helps clarify questions when candidates need assistance. When a candidate seems confused or directly asks for clarification, explain the question in simpler terms with examples. If they give a partial answer, ask follow-up questions to help them elaborate. Your goal is to maintain conversation flow and help candidates showcase their knowledge.""",
        "report_agent": """You are an interview assessment specialist. Create a detailed, constructive report of the interview without scoring or grading the candidate. Identify correct answers with green text and areas for improvement with red text. Focus on suggesting specific technical topics the candidate should study further rather than platforms or resources. Be encouraging and specific in your feedback."""
    }
    
    client = Groq(api_key=os.environ["GROQ_API_KEY"])
    response = client.chat.completions.create(
        model="llama-3.3-70b-versatile",
        messages=[
            {"role": "system", "content": system_prompts.get(agent_type, "You are an AI interview coach.")},
            {"role": "user", "content": prompt}
        ],
        temperature=temperature,
        max_tokens=800
    )
    return response.choices[0].message.content

def strip_markdown(text):
    text = re.sub(r'\*\*(.*?)\*\*', r'\1', text)
    text = re.sub(r'\*(.*?)\*', r'\1', text)
    text = re.sub(r'`(.*?)`', r'\1', text)
    text = re.sub(r'\[(.*?)\]\((.*?)\)', r'\1', text)
    text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
    text = re.sub(r'^>\s+', '', text, flags=re.MULTILINE)
    text = re.sub(r'^\s*[-*_]{3,}\s*$', '', text, flags=re.MULTILINE)
    text = re.sub(r'^\s*[-*+]\s+', '• ', text, flags=re.MULTILINE)
    text = re.sub(r'^\s*\d+\.\s+', '', text, flags=re.MULTILINE)
    return text

def strict_agent_monitor(candidate_response):
    prompt = f"""
    Candidate Response: "{candidate_response}"
    Check for these behaviors strictly but fairly:
    1. Repeated gibberish or nonsensical keyboard smashing.
    2. Harsh, rude, or aggressive language.
    3. Profanity or clearly offensive content.
    If clearly inappropriate (repeated profanity/aggression/gibberish), respond:
    "INAPPROPRIATE: [reason]"
    If minor awkwardness, occasional mistakes, or nervousness, respond simply:
    "ACCEPTABLE"
    Be forgiving, human-like, and flexible—only flag clear and serious issues.
    Be human-like: allow up to two minor instances before marking responses as inappropriate. 
    Only flag as inappropriate after clear repeated offenses (3 or more times) or severe disrespect/profanity.
    """
    return generate_groq_response(prompt, "technical_agent", temperature=0.1)

# -------------------- Initialize Components --------------------
embedding_model = setup_embeddings()
collection = setup_chromadb()

# -------------------- Gradio Application --------------------
class InterviewCoach:
    def __init__(self):
        self.user_id = str(uuid.uuid4())
        self.interview_active = False
        self.current_step = 0
        self.interview_phase = "greeting"
        self.questions = []
        self.responses = []
        self.candidate_name = "Candidate"
        self.needs_clarification = False
        self.clarification_response = None
        self.uploaded_file = None

    def start_interview(self, file):
        if not file:
            return "Please upload a resume file first", None, None
        
        self.uploaded_file = file
        self.interview_active = True
        self.current_step = 0
        self.interview_phase = "greeting"
        self.questions = []
        self.responses = []
        
        resume_text = extract_text_from_resume(file)
        self.candidate_name = store_resume(resume_text, self.user_id)
        
        resume_data = retrieve_resume(self.user_id, "background experience")
        greeting = self.zero_agent_greeting(resume_data, self.candidate_name)
        self.questions.append(greeting)
        
        return f"Interview started with {self.candidate_name}", greeting, gr.update(visible=True)

    def zero_agent_greeting(self, resume_data, candidate_name):
        prompt = f"""
        Resume Data: {resume_data}
        Candidate Name: {candidate_name}
        
        Generate a brief, warm greeting for {candidate_name}. The greeting should:
        1. Begin with "Hello [Candidate Name]" 
        2. Very briefly mention something from their resume (one skill or experience)
        3. Ask ONE simple question about their most recent job or experience
        4. Keep it extremely concise (2-3 short sentences maximum)
        
        The greeting must be brief as it will be converted to voice later.
        """
        return generate_groq_response(prompt, "zero_agent", temperature=0.7)

    def technical_agent_question(self, resume_data, interview_history, question_count):
        difficulty = "introductory" if question_count < 2 else "intermediate" if question_count < 4 else "advanced"
        
        prompt = f"""
        Resume Data: {resume_data}
        Interview History: {interview_history}
        Question Number: {question_count + 1}
        Difficulty: {difficulty}
        
        Generate a relevant technical interview question based on the candidate's resume. The question should:
        1. Be specific to skills or experiences mentioned in their resume
        2. Feel like it's coming from someone who has read their background
        3. Be appropriately challenging based on their experience level
        4. Be directly relevant to their field
        5. Be clearly phrased as a question (no preambles or explanations)
        """
        return generate_groq_response(prompt, "technical_agent", temperature=0.7)

    def clarification_agent_response(self, question, candidate_response, resume_data):
        needs_clarification = any(phrase in candidate_response.lower() for phrase in 
                                ["i don't understand", "can you explain", "not sure", "what do you mean", 
                                 "confused", "unclear", "can you clarify", "don't know what", "?"])
        
        if needs_clarification:
            prompt = f"""
            Original Question: {question}
            Candidate Response: {candidate_response}
            Resume Data: {resume_data}
            
            The candidate needs clarification. Your task is to:
            1. Acknowledge their confusion
            2. Explain the question in simpler terms
            3. Provide a concrete example to illustrate what you're asking
            4. Rephrase the question in a more approachable way
            
            IMPORTANT: Respond in a direct, conversational manner WITHOUT any explanation of your reasoning.
            """
            return generate_groq_response(prompt, "clarification_agent", temperature=0.6)
        else:
            prompt = f"""
            Original Question: {question}
            Candidate Response: {candidate_response}
            Resume Data: {resume_data}
            
            Evaluate if this response is complete or needs a follow-up.
            If the response is thorough and complete, respond with "COMPLETE".
            If the response is partial or could benefit from elaboration, provide a specific follow-up question.
            If the response is off-topic, provide a more specific version of the original question.
            
            IMPORTANT: If providing a follow-up question, give ONLY the question itself without any explanation of why you're asking it.
            """
            follow_up = generate_groq_response(prompt, "clarification_agent", temperature=0.6)
            
            if "COMPLETE" in follow_up:
                return None
            else:
                question_match = re.search(r"(?:To help|I would|Let me|Could you|What|How|Why|Can you|Tell me|Describe|Explain).*\?", follow_up)
                if question_match:
                    return question_match.group(0)
                return follow_up

    def report_agent_feedback(self, interview_data, resume_data):
        questions_answers = "\n\n".join([
            f"Q{i+1}: {qa['question']}\nAnswer: {qa['answer']}" 
            for i, qa in enumerate(interview_data)
        ])
        
        prompt = f"""
        Resume Data: {resume_data}
        
        Interview Transcript:
        {questions_answers}
        
        Generate a detailed, visually appealing interview report that:
        1. Analyzes each answer without scoring or grading
        2. Identifies correct information (prefix with "CORRECT: ")
        3. Identifies areas for improvement (prefix with "IMPROVE: ")
        4. Recommends 3-5 specific technical topics (not platforms) the candidate should focus on
        
        Format guidelines:
        - Use emojis to make sections more engaging (✅ for correct points, 💡 for improvement areas)
        - ABSOLUTELY NO MARKDOWN SYNTAX - use plain text only without asterisks, backticks, hashes, etc.
        - Use simple formatting that works well in HTML
        - For each question, provide concise bullet-point style feedback
        - Keep language encouraging and constructive
        
        Format the report with these sections:
        - QUESTION ANALYSIS (for each question)
        - KEY STRENGTHS
        - FOCUS AREAS
        - RECOMMENDED TOPICS
        
        Do not include any numerical scores or grades.
        """
        feedback = generate_groq_response(prompt, "report_agent", temperature=0.7)
        return strip_markdown(feedback)

    def process_response(self, answer):
        if not answer.strip():
            return "Please provide a response", None, None
        
        appropriateness_check = strict_agent_monitor(answer)
        if "INAPPROPRIATE:" in appropriateness_check:
            reason = appropriateness_check.split("INAPPROPRIATE:")[1].strip()
            self.interview_active = False
            return f"⚠️ Interview Terminated: {reason}", None, gr.update(visible=False)
        
        current_question = self.questions[self.current_step]
        
        if self.needs_clarification:
            self.needs_clarification = False
            self.responses[-1]['clarification'] = self.clarification_response
            self.responses[-1]['clarification_response'] = answer
            self.clarification_response = None
            
            if self.interview_phase == "greeting":
                self.interview_phase = "technical"
                resume_data = retrieve_resume(self.user_id, "technical skills")
                new_question = self.technical_agent_question(resume_data, "", 0)
                self.questions.append(new_question)
                self.current_step += 1
                return None, new_question, None
            elif len(self.responses) >= 6:
                self.interview_active = False
                return self.generate_final_report(), None, gr.update(visible=False)
            else:
                interview_history = "\n".join([
                    f"Q: {item['question']}\nA: {item['answer']}" 
                    for item in self.responses
                ])
                resume_data = retrieve_resume(self.user_id, "technical skills")
                new_question = self.technical_agent_question(
                    resume_data, 
                    interview_history, 
                    len(self.responses) - 1
                )
                self.questions.append(new_question)
                self.current_step += 1
                return None, new_question, None
        else:
            self.responses.append({
                'question': current_question,
                'answer': answer
            })
            
            resume_data = retrieve_resume(self.user_id, current_question)
            clarification = self.clarification_agent_response(
                current_question, 
                answer,
                resume_data
            )
            
            if clarification:
                self.needs_clarification = True
                self.clarification_response = clarification
                return None, clarification, None
            else:
                if self.interview_phase == "greeting":
                    self.interview_phase = "technical"
                    resume_data = retrieve_resume(self.user_id, "technical skills")
                    new_question = self.technical_agent_question(resume_data, "", 0)
                    self.questions.append(new_question)
                    self.current_step += 1
                    return None, new_question, None
                elif len(self.responses) >= 6:
                    self.interview_active = False
                    return self.generate_final_report(), None, gr.update(visible=False)
                else:
                    interview_history = "\n".join([
                        f"Q: {item['question']}\nA: {item['answer']}" 
                        for item in self.responses
                    ])
                    resume_data = retrieve_resume(self.user_id, "technical skills")
                    new_question = self.technical_agent_question(
                        resume_data, 
                        interview_history, 
                        len(self.responses) - 1
                    )
                    self.questions.append(new_question)
                    self.current_step += 1
                    return None, new_question, None

    def generate_final_report(self):
        resume_data = retrieve_resume(self.user_id, "complete profile")
        feedback = self.report_agent_feedback(self.responses, resume_data)
        
        processed_feedback = []
        for qa_index, qa in enumerate(self.responses):
            question_section = f"Q{qa_index+1}: {qa['question']}"
            answer_section = f"Answer: {qa['answer']}"
            
            correct_parts = re.findall(r"CORRECT:(.*?)(?=IMPROVE:|$)", feedback, re.DOTALL)
            improve_parts = re.findall(r"IMPROVE:(.*?)(?=CORRECT:|$)", feedback, re.DOTALL)

            correct_html = ""
            if qa_index < len(correct_parts) and correct_parts[qa_index].strip():
                correct_text = strip_markdown(correct_parts[qa_index].strip())
                correct_html = f"""
                <div style="color: #4CD964; border-left: 4px solid #4CD964; padding-left: 1rem; margin: 1rem 0;">
                    <h4 style="color: #4CD964; margin:0;">✅ Strong Points</h4>
                    <p style="color: #CCCCCC; margin-top:0.5rem;">{correct_text}</p>
                </div>
                """
                
            improve_html = ""
            if qa_index < len(improve_parts) and improve_parts[qa_index].strip():
                improve_html = f"""
                <div style="color: #FF3B30; border-left: 4px solid #FF3B30; padding-left: 1rem; margin: 1rem 0;">
                    <h4 style="color: #FF3B30; margin:0;">💡 Areas to Develop</h4>
                    <p style="color: #CCCCCC; margin-top:0.5rem;">{improve_parts[qa_index].strip()}</p>
                </div>
                """
                
            processed_feedback.append({
                "question": question_section,
                "answer": answer_section,
                "correct_html": correct_html,
                "improve_html": improve_html
            })

        topic_match = re.search(r"RECOMMENDED TOPICS:(.*?)(?=$)", feedback, re.DOTALL)
        topics = []
        if topic_match:
            topics_text = topic_match.group(1).strip()
            topics = [topic.strip() for topic in re.split(r'\d+\.\s+', topics_text) if topic.strip()]
            topics = [topic for topic in topics if len(topic) > 3]
    
        report_html = """
        <div style="background:#1A1A1A; border-radius:15px; padding:2rem; margin:1rem 0; border:1px solid #333333;">
            <h3 style='color: #4A90E2; margin-bottom: 1.5rem;'>Interview Summary Report</h3>
            <div style="background:#2D2D2D; padding:1.5rem; border-radius:10px; margin:2rem 0;">
                <h4 style="margin:0; color:#FFFFFF;">Interview Overview</h4>
                <p style="margin:1rem 0 0 0; color:#CCCCCC;">Below is a detailed breakdown of your interview responses with constructive feedback to help you improve your technical skills.</p>
            </div>
            <h4 style='color: #FFFFFF; margin-bottom:1rem;'>Question-by-Question Analysis</h4>
        """
        
        for idx, response in enumerate(processed_feedback):
            report_html += f"""
            <div style='background:#2D2D2D; border-radius:10px; padding:1.5rem; margin:1rem 0;'>
                <details>
                    <summary style='color: #FFFFFF; font-weight:500; cursor:pointer;'>Question {idx+1}</summary>
                    <div style='margin-top:1rem;'>
                        <p style='font-weight: 500; color: #FFFFFF; font-size: 1.1rem;'>❝{response['question']}❞</p>
                        <div style='background: #333333; padding:1rem; border-radius:8px; margin:1rem 0;'>
                            <p style='color: #888888; margin:0;'>Your Answer:</p>
                            <p style='color: #FFFFFF; margin:0.5rem 0;'>{response['answer']}</p>
                        </div>
                        {response['correct_html']}
                        {response['improve_html']}
                    </div>
                </details>
            </div>
            """
        
        if topics:
            report_html += """
            <div style="background:#2D2D2D; padding:1.5rem; border-radius:10px; margin:1rem 0;">
                <h4 style="margin:0; color:#FFFFFF;">Recommended Topics to Study</h4>
                <p style="margin:1rem 0; color:#CCCCCC;">Based on your interview responses, we recommend focusing on these key areas:</p>
                <div style="margin-top:1rem;">
            """
            
            for topic in topics:
                report_html += f"""
                <span style="display:inline-block; background:#333333; padding:5px 10px; margin:5px; border-radius:15px; font-size:0.8rem;">{topic}</span>
                """
                
            report_html += """
                </div>
            </div>
            """
        
        report_html += "</div>"
        return report_html

# Create the Gradio interface
coach = InterviewCoach()

with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray")) as demo:
    gr.Markdown("# 💼 MockMate-Your personal AI mock interview buddy.")
    gr.Markdown("Upload your resume for a personalized mock interview session")
    
    with gr.Row():
        with gr.Column(scale=1):
            file_upload = gr.File(label="Upload Resume (PDF or TXT)", file_types=[".pdf", ".txt"])
            start_btn = gr.Button("🚀 Start Interview Session")
            status = gr.Textbox(label="Status", interactive=False)
        
        with gr.Column(scale=2):
            question_display = gr.Textbox(label="Current Question", interactive=False, lines=3)
            answer_input = gr.Textbox(label="Your Response", lines=5, visible=False)
            submit_btn = gr.Button("Submit Response", visible=False)
            report_display = gr.HTML(label="Interview Report", visible=False)
    
    def start_interview(file):
        return coach.start_interview(file)
    
    def process_response(answer):
        return coach.process_response(answer)
    
    start_btn.click(
        start_interview,
        inputs=[file_upload],
        outputs=[status, question_display, answer_input]
    ).then(
        lambda: gr.update(visible=True),
        outputs=[submit_btn]
    )
    
    submit_btn.click(
        process_response,
        inputs=[answer_input],
        outputs=[status, question_display, submit_btn]
    ).then(
        lambda report: gr.update(value=report, visible=True) if report else None,
        inputs=[report_display],
        outputs=[report_display]
    )

demo.launch()