Spaces:
Runtime error
Runtime error
import gradio as gr | |
import numpy as np | |
from transformers import pipeline | |
from sentence_transformers import SentenceTransformer | |
from sklearn.metrics.pairwise import cosine_similarity | |
import PyPDF2 | |
# Load local models for inference | |
stt_model = pipeline("automatic-speech-recognition", model="openai/whisper-base") | |
conversation_model = pipeline("text-generation", model="facebook/blenderbot-400M-distill") | |
# Load a pre-trained model for vector embeddings | |
embedding_model = SentenceTransformer('all-MiniLM-L6-v2') | |
# Parse PDF and create resume content | |
def parse_resume(pdf): | |
"""Extract text from an uploaded PDF file.""" | |
reader = PyPDF2.PdfReader(pdf) | |
text = "\n".join(page.extract_text() for page in reader.pages if page.extract_text()) | |
sections = {"Resume Content": text} | |
return sections | |
# Process resume and generate embeddings | |
def process_resume(pdf): | |
resume_content = parse_resume(pdf) | |
resume_embeddings = { | |
section: embedding_model.encode(content) for section, content in resume_content.items() | |
} | |
return resume_embeddings | |
# Generate a conversation response | |
def generate_conversation_response(user_input): | |
prompt = f"The user said: {user_input}. Respond appropriately as a recruiter." | |
response = conversation_model(prompt, max_length=100, num_return_sequences=1) | |
return response[0]["generated_text"] | |
# Generate question from user response | |
def generate_question(user_input, resume_embeddings): | |
"""Find the most relevant section in the resume and generate a question.""" | |
user_embedding = embedding_model.encode(user_input) | |
similarities = { | |
section: cosine_similarity([user_embedding], [embedding])[0][0] | |
for section, embedding in resume_embeddings.items() | |
} | |
most_relevant_section = max(similarities, key=similarities.get) | |
return f"Based on your experience in {most_relevant_section}, can you elaborate more?" | |
# Gradio interface | |
def mock_interview(audio, pdf): | |
resume_embeddings = process_resume(pdf) | |
transcription = stt_model(audio)["text"] | |
question = generate_question(transcription, resume_embeddings) | |
return transcription, question | |
interface = gr.Interface( | |
fn=mock_interview, | |
inputs=[gr.Audio(source="microphone", type="filepath"), gr.File(label="Upload Resume (PDF)")], | |
outputs=["text", "text"], | |
title="Mock Interview AI", | |
description="Upload your resume and answer questions in a mock interview." | |
) | |
if __name__ == "__main__": | |
interface.launch() | |