Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,17 +10,17 @@ import os
|
|
10 |
|
11 |
# -------------------- Core Functions --------------------
|
12 |
def setup_embeddings():
|
13 |
-
return HuggingFaceEmbeddings(model_name="sentence-transformers/all-
|
14 |
|
15 |
def setup_chromadb():
|
16 |
client = chromadb.PersistentClient(path="./chroma_db")
|
17 |
return client.get_or_create_collection(name="resumes")
|
18 |
|
19 |
def extract_text_from_resume(file):
|
20 |
-
if file.name.endswith(
|
21 |
doc = fitz.open(stream=file.read(), filetype="pdf")
|
22 |
return "\n".join([page.get_text("text") for page in doc])
|
23 |
-
elif file.name.endswith(
|
24 |
return file.read().decode("utf-8")
|
25 |
return ""
|
26 |
|
@@ -99,7 +99,7 @@ def strict_agent_monitor(candidate_response):
|
|
99 |
embedding_model = setup_embeddings()
|
100 |
collection = setup_chromadb()
|
101 |
|
102 |
-
# -------------------- Gradio
|
103 |
class InterviewCoach:
|
104 |
def __init__(self):
|
105 |
self.user_id = str(uuid.uuid4())
|
@@ -113,28 +113,25 @@ class InterviewCoach:
|
|
113 |
self.clarification_response = None
|
114 |
self.uploaded_file = None
|
115 |
|
116 |
-
def
|
117 |
-
|
118 |
-
|
119 |
-
resume_text = extract_text_from_resume(f)
|
120 |
-
self.candidate_name = store_resume(resume_text, self.user_id)
|
121 |
-
return f"Resume processed for {self.candidate_name}"
|
122 |
-
|
123 |
-
def start_interview(self):
|
124 |
-
if not self.uploaded_file:
|
125 |
-
return "Please upload a resume first", None
|
126 |
|
|
|
127 |
self.interview_active = True
|
128 |
self.current_step = 0
|
129 |
self.interview_phase = "greeting"
|
130 |
self.questions = []
|
131 |
self.responses = []
|
132 |
|
|
|
|
|
|
|
133 |
resume_data = retrieve_resume(self.user_id, "background experience")
|
134 |
greeting = zero_agent_greeting(resume_data, self.candidate_name)
|
135 |
self.questions.append(greeting)
|
136 |
|
137 |
-
return f"Interview started with {self.candidate_name}", greeting
|
138 |
|
139 |
def zero_agent_greeting(self, resume_data, candidate_name):
|
140 |
prompt = f"""
|
@@ -146,6 +143,8 @@ class InterviewCoach:
|
|
146 |
2. Very briefly mention something from their resume (one skill or experience)
|
147 |
3. Ask ONE simple question about their most recent job or experience
|
148 |
4. Keep it extremely concise (2-3 short sentences maximum)
|
|
|
|
|
149 |
"""
|
150 |
return generate_groq_response(prompt, "zero_agent", temperature=0.7)
|
151 |
|
@@ -158,14 +157,19 @@ class InterviewCoach:
|
|
158 |
Question Number: {question_count + 1}
|
159 |
Difficulty: {difficulty}
|
160 |
|
161 |
-
Generate a relevant technical interview question based on the candidate's resume.
|
|
|
|
|
|
|
|
|
|
|
162 |
"""
|
163 |
return generate_groq_response(prompt, "technical_agent", temperature=0.7)
|
164 |
|
165 |
def clarification_agent_response(self, question, candidate_response, resume_data):
|
166 |
needs_clarification = any(phrase in candidate_response.lower() for phrase in
|
167 |
["i don't understand", "can you explain", "not sure", "what do you mean",
|
168 |
-
|
169 |
|
170 |
if needs_clarification:
|
171 |
prompt = f"""
|
@@ -241,18 +245,15 @@ class InterviewCoach:
|
|
241 |
feedback = generate_groq_response(prompt, "report_agent", temperature=0.7)
|
242 |
return strip_markdown(feedback)
|
243 |
|
244 |
-
def
|
245 |
-
if not self.interview_active:
|
246 |
-
return "Interview not active. Please start the interview first.", None, None
|
247 |
-
|
248 |
if not answer.strip():
|
249 |
-
return "Please
|
250 |
|
251 |
appropriateness_check = strict_agent_monitor(answer)
|
252 |
if "INAPPROPRIATE:" in appropriateness_check:
|
253 |
reason = appropriateness_check.split("INAPPROPRIATE:")[1].strip()
|
254 |
self.interview_active = False
|
255 |
-
return f"⚠️ Interview Terminated: {reason}", None,
|
256 |
|
257 |
current_question = self.questions[self.current_step]
|
258 |
|
@@ -271,7 +272,7 @@ class InterviewCoach:
|
|
271 |
return None, new_question, None
|
272 |
elif len(self.responses) >= 6:
|
273 |
self.interview_active = False
|
274 |
-
return self.generate_final_report(), None,
|
275 |
else:
|
276 |
interview_history = "\n".join([
|
277 |
f"Q: {item['question']}\nA: {item['answer']}"
|
@@ -313,7 +314,7 @@ class InterviewCoach:
|
|
313 |
return None, new_question, None
|
314 |
elif len(self.responses) >= 6:
|
315 |
self.interview_active = False
|
316 |
-
return self.generate_final_report(), None,
|
317 |
else:
|
318 |
interview_history = "\n".join([
|
319 |
f"Q: {item['question']}\nA: {item['answer']}"
|
@@ -386,25 +387,24 @@ class InterviewCoach:
|
|
386 |
|
387 |
for idx, response in enumerate(processed_feedback):
|
388 |
report_html += f"""
|
389 |
-
<
|
390 |
-
<
|
391 |
-
|
392 |
-
<
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
|
|
|
|
|
|
397 |
</div>
|
398 |
-
|
399 |
-
|
400 |
-
{response['improve_html']}
|
401 |
-
</div>
|
402 |
-
</details>
|
403 |
"""
|
404 |
|
405 |
if topics:
|
406 |
report_html += """
|
407 |
-
<h4 style='color: #FFFFFF; margin:2rem 0 1rem 0;'>📚 Focus Areas for Improvement</h4>
|
408 |
<div style="background:#2D2D2D; padding:1.5rem; border-radius:10px; margin:1rem 0;">
|
409 |
<h4 style="margin:0; color:#FFFFFF;">Recommended Topics to Study</h4>
|
410 |
<p style="margin:1rem 0; color:#CCCCCC;">Based on your interview responses, we recommend focusing on these key areas:</p>
|
@@ -413,7 +413,7 @@ class InterviewCoach:
|
|
413 |
|
414 |
for topic in topics:
|
415 |
report_html += f"""
|
416 |
-
<span style="display:
|
417 |
"""
|
418 |
|
419 |
report_html += """
|
@@ -424,50 +424,48 @@ class InterviewCoach:
|
|
424 |
report_html += "</div>"
|
425 |
return report_html
|
426 |
|
427 |
-
# Create the interface
|
428 |
coach = InterviewCoach()
|
429 |
|
430 |
-
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo:
|
431 |
gr.Markdown("# 💼 AI-Powered Interview Coach")
|
432 |
gr.Markdown("Upload your resume for a personalized mock interview session")
|
433 |
|
434 |
with gr.Row():
|
435 |
-
with gr.Column():
|
436 |
-
|
437 |
-
upload_btn = gr.Button("Upload Resume")
|
438 |
-
upload_status = gr.Textbox(label="Upload Status", interactive=False)
|
439 |
-
|
440 |
start_btn = gr.Button("🚀 Start Interview Session")
|
441 |
-
|
442 |
-
|
443 |
-
question_display = gr.Textbox(label="Current Question", interactive=False)
|
444 |
-
answer_input = gr.Textbox(label="Your Response", lines=5)
|
445 |
-
submit_btn = gr.Button("Submit Response")
|
446 |
-
|
447 |
-
clarification_display = gr.Textbox(label="Clarification", visible=False, interactive=False)
|
448 |
-
|
449 |
-
report_display = gr.HTML(label="Interview Report")
|
450 |
-
|
451 |
-
def toggle_clarification(needs_clarification):
|
452 |
-
return gr.Textbox(visible=needs_clarification)
|
453 |
-
|
454 |
-
# Event handlers
|
455 |
-
upload_btn.click(
|
456 |
-
fn=coach.process_resume,
|
457 |
-
inputs=file_input,
|
458 |
-
outputs=upload_status
|
459 |
-
)
|
460 |
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
471 |
|
472 |
-
|
473 |
-
demo.launch()
|
|
|
10 |
|
11 |
# -------------------- Core Functions --------------------
|
12 |
def setup_embeddings():
|
13 |
+
return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
14 |
|
15 |
def setup_chromadb():
|
16 |
client = chromadb.PersistentClient(path="./chroma_db")
|
17 |
return client.get_or_create_collection(name="resumes")
|
18 |
|
19 |
def extract_text_from_resume(file):
|
20 |
+
if file.name.endswith(".pdf"):
|
21 |
doc = fitz.open(stream=file.read(), filetype="pdf")
|
22 |
return "\n".join([page.get_text("text") for page in doc])
|
23 |
+
elif file.name.endswith(".txt"):
|
24 |
return file.read().decode("utf-8")
|
25 |
return ""
|
26 |
|
|
|
99 |
embedding_model = setup_embeddings()
|
100 |
collection = setup_chromadb()
|
101 |
|
102 |
+
# -------------------- Gradio Application --------------------
|
103 |
class InterviewCoach:
|
104 |
def __init__(self):
|
105 |
self.user_id = str(uuid.uuid4())
|
|
|
113 |
self.clarification_response = None
|
114 |
self.uploaded_file = None
|
115 |
|
116 |
+
def start_interview(self, file):
|
117 |
+
if not file:
|
118 |
+
return "Please upload a resume file first", None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
+
self.uploaded_file = file
|
121 |
self.interview_active = True
|
122 |
self.current_step = 0
|
123 |
self.interview_phase = "greeting"
|
124 |
self.questions = []
|
125 |
self.responses = []
|
126 |
|
127 |
+
resume_text = extract_text_from_resume(file)
|
128 |
+
self.candidate_name = store_resume(resume_text, self.user_id)
|
129 |
+
|
130 |
resume_data = retrieve_resume(self.user_id, "background experience")
|
131 |
greeting = zero_agent_greeting(resume_data, self.candidate_name)
|
132 |
self.questions.append(greeting)
|
133 |
|
134 |
+
return f"Interview started with {self.candidate_name}", greeting, gr.update(visible=True)
|
135 |
|
136 |
def zero_agent_greeting(self, resume_data, candidate_name):
|
137 |
prompt = f"""
|
|
|
143 |
2. Very briefly mention something from their resume (one skill or experience)
|
144 |
3. Ask ONE simple question about their most recent job or experience
|
145 |
4. Keep it extremely concise (2-3 short sentences maximum)
|
146 |
+
|
147 |
+
The greeting must be brief as it will be converted to voice later.
|
148 |
"""
|
149 |
return generate_groq_response(prompt, "zero_agent", temperature=0.7)
|
150 |
|
|
|
157 |
Question Number: {question_count + 1}
|
158 |
Difficulty: {difficulty}
|
159 |
|
160 |
+
Generate a relevant technical interview question based on the candidate's resume. The question should:
|
161 |
+
1. Be specific to skills or experiences mentioned in their resume
|
162 |
+
2. Feel like it's coming from someone who has read their background
|
163 |
+
3. Be appropriately challenging based on their experience level
|
164 |
+
4. Be directly relevant to their field
|
165 |
+
5. Be clearly phrased as a question (no preambles or explanations)
|
166 |
"""
|
167 |
return generate_groq_response(prompt, "technical_agent", temperature=0.7)
|
168 |
|
169 |
def clarification_agent_response(self, question, candidate_response, resume_data):
|
170 |
needs_clarification = any(phrase in candidate_response.lower() for phrase in
|
171 |
["i don't understand", "can you explain", "not sure", "what do you mean",
|
172 |
+
"confused", "unclear", "can you clarify", "don't know what", "?"])
|
173 |
|
174 |
if needs_clarification:
|
175 |
prompt = f"""
|
|
|
245 |
feedback = generate_groq_response(prompt, "report_agent", temperature=0.7)
|
246 |
return strip_markdown(feedback)
|
247 |
|
248 |
+
def process_response(self, answer):
|
|
|
|
|
|
|
249 |
if not answer.strip():
|
250 |
+
return "Please provide a response", None, None
|
251 |
|
252 |
appropriateness_check = strict_agent_monitor(answer)
|
253 |
if "INAPPROPRIATE:" in appropriateness_check:
|
254 |
reason = appropriateness_check.split("INAPPROPRIATE:")[1].strip()
|
255 |
self.interview_active = False
|
256 |
+
return f"⚠️ Interview Terminated: {reason}", None, gr.update(visible=False)
|
257 |
|
258 |
current_question = self.questions[self.current_step]
|
259 |
|
|
|
272 |
return None, new_question, None
|
273 |
elif len(self.responses) >= 6:
|
274 |
self.interview_active = False
|
275 |
+
return self.generate_final_report(), None, gr.update(visible=False)
|
276 |
else:
|
277 |
interview_history = "\n".join([
|
278 |
f"Q: {item['question']}\nA: {item['answer']}"
|
|
|
314 |
return None, new_question, None
|
315 |
elif len(self.responses) >= 6:
|
316 |
self.interview_active = False
|
317 |
+
return self.generate_final_report(), None, gr.update(visible=False)
|
318 |
else:
|
319 |
interview_history = "\n".join([
|
320 |
f"Q: {item['question']}\nA: {item['answer']}"
|
|
|
387 |
|
388 |
for idx, response in enumerate(processed_feedback):
|
389 |
report_html += f"""
|
390 |
+
<div style='background:#2D2D2D; border-radius:10px; padding:1.5rem; margin:1rem 0;'>
|
391 |
+
<details>
|
392 |
+
<summary style='color: #FFFFFF; font-weight:500; cursor:pointer;'>Question {idx+1}</summary>
|
393 |
+
<div style='margin-top:1rem;'>
|
394 |
+
<p style='font-weight: 500; color: #FFFFFF; font-size: 1.1rem;'>❝{response['question']}❞</p>
|
395 |
+
<div style='background: #333333; padding:1rem; border-radius:8px; margin:1rem 0;'>
|
396 |
+
<p style='color: #888888; margin:0;'>Your Answer:</p>
|
397 |
+
<p style='color: #FFFFFF; margin:0.5rem 0;'>{response['answer']}</p>
|
398 |
+
</div>
|
399 |
+
{response['correct_html']}
|
400 |
+
{response['improve_html']}
|
401 |
</div>
|
402 |
+
</details>
|
403 |
+
</div>
|
|
|
|
|
|
|
404 |
"""
|
405 |
|
406 |
if topics:
|
407 |
report_html += """
|
|
|
408 |
<div style="background:#2D2D2D; padding:1.5rem; border-radius:10px; margin:1rem 0;">
|
409 |
<h4 style="margin:0; color:#FFFFFF;">Recommended Topics to Study</h4>
|
410 |
<p style="margin:1rem 0; color:#CCCCCC;">Based on your interview responses, we recommend focusing on these key areas:</p>
|
|
|
413 |
|
414 |
for topic in topics:
|
415 |
report_html += f"""
|
416 |
+
<span style="display:inline-block; background:#333333; padding:5px 10px; margin:5px; border-radius:15px; font-size:0.8rem;">{topic}</span>
|
417 |
"""
|
418 |
|
419 |
report_html += """
|
|
|
424 |
report_html += "</div>"
|
425 |
return report_html
|
426 |
|
427 |
+
# Create the Gradio interface
|
428 |
coach = InterviewCoach()
|
429 |
|
430 |
+
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray")) as demo:
|
431 |
gr.Markdown("# 💼 AI-Powered Interview Coach")
|
432 |
gr.Markdown("Upload your resume for a personalized mock interview session")
|
433 |
|
434 |
with gr.Row():
|
435 |
+
with gr.Column(scale=1):
|
436 |
+
file_upload = gr.File(label="Upload Resume (PDF or TXT)", file_types=[".pdf", ".txt"])
|
|
|
|
|
|
|
437 |
start_btn = gr.Button("🚀 Start Interview Session")
|
438 |
+
status = gr.Textbox(label="Status", interactive=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
439 |
|
440 |
+
with gr.Column(scale=2):
|
441 |
+
question_display = gr.Textbox(label="Current Question", interactive=False, lines=3)
|
442 |
+
answer_input = gr.Textbox(label="Your Response", lines=5, visible=False)
|
443 |
+
submit_btn = gr.Button("Submit Response", visible=False)
|
444 |
+
report_display = gr.HTML(label="Interview Report", visible=False)
|
445 |
+
|
446 |
+
def start_interview(file):
|
447 |
+
return coach.start_interview(file)
|
448 |
+
|
449 |
+
def process_response(answer):
|
450 |
+
return coach.process_response(answer)
|
451 |
+
|
452 |
+
start_btn.click(
|
453 |
+
start_interview,
|
454 |
+
inputs=[file_upload],
|
455 |
+
outputs=[status, question_display, answer_input]
|
456 |
+
).then(
|
457 |
+
lambda: gr.update(visible=True),
|
458 |
+
outputs=[submit_btn]
|
459 |
+
)
|
460 |
+
|
461 |
+
submit_btn.click(
|
462 |
+
process_response,
|
463 |
+
inputs=[answer_input],
|
464 |
+
outputs=[status, question_display, submit_btn]
|
465 |
+
).then(
|
466 |
+
lambda report: gr.update(value=report, visible=True) if report else None,
|
467 |
+
inputs=[report_display],
|
468 |
+
outputs=[report_display]
|
469 |
+
)
|
470 |
|
471 |
+
demo.launch()
|
|