Spaces:
Runtime error
Runtime error
File size: 4,844 Bytes
3e6e3a1 65fceff 3e6e3a1 65fceff 2394291 65fceff 3e6e3a1 912706a 65fceff 2394291 65fceff 3e6e3a1 48482d6 3e6e3a1 65fceff 3e6e3a1 d72f5a1 65fceff d72f5a1 65fceff 3e6e3a1 48482d6 2394291 48482d6 d72f5a1 48482d6 2394291 48482d6 8496f4f 02b7531 2394291 02b7531 8496f4f d72f5a1 2394291 48482d6 2394291 48482d6 2394291 d72f5a1 48482d6 d72f5a1 48482d6 8496f4f 48482d6 e03fb29 48482d6 d72f5a1 48482d6 2394291 d72f5a1 e03fb29 48482d6 2394291 d72f5a1 2394291 65fceff 3e6e3a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import gradio as gr
import numpy as np
from transformers import pipeline
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import PyPDF2
import pyttsx3
import time
# Load local models for inference
stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-base")
conversation_model = pipeline("text-generation", model="facebook/blenderbot-400M-distill")
# Load a pre-trained model for vector embeddings
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
# Text-to-Speech engine setup
tts_engine = pyttsx3.init()
def speak_text(text):
"""Speak the given text using TTS engine."""
tts_engine.say(text)
tts_engine.runAndWait()
# Parse PDF and create resume content
def parse_resume(pdf):
"""Extract text from an uploaded PDF file."""
reader = PyPDF2.PdfReader(pdf)
text = "\n".join(page.extract_text() for page in reader.pages if page.extract_text())
sections = {"Resume Content": text}
return sections
# Process job description text
def process_job_description(job_desc):
"""Encode the job description for analysis."""
return embedding_model.encode(job_desc)
# Process resume and generate embeddings
def process_resume(pdf):
resume_content = parse_resume(pdf)
resume_embeddings = {
section: embedding_model.encode(content) for section, content in resume_content.items()
}
return resume_embeddings
# Generate question from user response
def generate_question(user_input, resume_embeddings):
"""Find the most relevant section in the resume and generate a question."""
user_embedding = embedding_model.encode(user_input)
similarities = {
section: cosine_similarity([user_embedding], [embedding])[0][0]
for section, embedding in resume_embeddings.items()
}
most_relevant_section = max(similarities, key=similarities.get)
return f"Based on your experience in {most_relevant_section}, can you elaborate more?"
# Gradio interface
class MockInterview:
def __init__(self):
self.resume_embeddings = None
self.job_desc_embedding = None
self.interview_active = False
def upload_inputs(self, resume, job_desc):
self.resume_embeddings = process_resume(resume)
self.job_desc_embedding = process_job_description(job_desc)
self.interview_active = True
return "Resume and job description processed. Starting the interview."
def start_interview(self):
if not self.interview_active:
return "Please upload your resume and job description first."
question = "Tell me about yourself."
speak_text(question)
return question
def conduct_interview(self, audio_file):
if not self.interview_active:
return "Please upload your resume and job description first.", ""
# Transcribe audio
transcription = stt_model(audio_file)["text"]
if not transcription.strip():
return "No audio detected. Please try again.", ""
# Generate next question
question = generate_question(transcription, self.resume_embeddings)
speak_text(question)
return transcription, question
def end_interview(self):
self.interview_active = False
speak_text("Thank you for participating in the interview. Goodbye!")
return "Interview ended. Thank you for participating."
mock_interview = MockInterview()
def upload_inputs(resume, job_desc):
return mock_interview.upload_inputs(resume, job_desc)
def start_interview():
return mock_interview.start_interview()
def conduct_interview(audio_file):
return mock_interview.conduct_interview(audio_file)
def end_interview():
return mock_interview.end_interview()
interface = gr.Blocks()
with interface:
gr.Markdown("""# Mock Interview AI
Upload your resume and job description, then engage in a realistic audio-based interview simulation.""")
with gr.Row():
resume_input = gr.File(label="Upload Resume (PDF)")
job_desc_input = gr.Textbox(label="Paste Job Description")
upload_button = gr.Button("Upload and Start Interview")
with gr.Row():
audio_input = gr.Audio(type="filepath", label="Respond with Your Answer")
transcription_output = gr.Textbox(label="Transcription")
question_output = gr.Textbox(label="Question")
submit_button = gr.Button("Submit Response")
end_button = gr.Button("End Interview")
upload_button.click(upload_inputs, inputs=[resume_input, job_desc_input], outputs=[question_output])
submit_button.click(conduct_interview, inputs=[audio_input], outputs=[transcription_output, question_output])
end_button.click(end_interview, outputs=[question_output])
if __name__ == "__main__":
interface.launch()
|