import os import io import streamlit as st import docx import docx2txt import tempfile from transformers import pipeline import numpy as np from scipy.spatial.distance import cosine import time import re # Set page title and hide sidebar st.set_page_config( page_title="Resume Analyzer and Company Suitability Checker", initial_sidebar_state="collapsed" ) # Hide sidebar completely with custom CSS st.markdown(""" """, unsafe_allow_html=True) ##################################### # Preload Models ##################################### @st.cache_resource(show_spinner=True) def load_models(): """Load models at startup""" with st.spinner("Loading AI models... This may take a minute on first run."): models = {} # Load summarization model models['summarizer'] = pipeline("summarization", model="t5-base") # Load feature extraction model for similarity models['feature_extractor'] = pipeline("feature-extraction", model="bert-base-uncased") return models # Preload models immediately when app starts models = load_models() ##################################### # Function: Extract Text from File ##################################### def extract_text_from_file(file_obj): """ Extract text from .docx and .doc files. Returns the extracted text or an error message if extraction fails. """ filename = file_obj.name ext = os.path.splitext(filename)[1].lower() text = "" if ext == ".docx": try: document = docx.Document(file_obj) text = "\n".join(para.text for para in document.paragraphs if para.text.strip()) except Exception as e: text = f"Error processing DOCX file: {e}" elif ext == ".doc": try: # For .doc files, we need to save to a temp file with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file: temp_file.write(file_obj.getvalue()) temp_path = temp_file.name # Use docx2txt which is generally faster try: text = docx2txt.process(temp_path) except Exception: text = "Could not process .doc file. Please convert to .docx format." # Clean up temp file os.unlink(temp_path) except Exception as e: text = f"Error processing DOC file: {e}" elif ext == ".txt": try: text = file_obj.getvalue().decode("utf-8") except Exception as e: text = f"Error processing TXT file: {e}" else: text = "Unsupported file type. Please upload a .docx, .doc, or .txt file." return text ##################################### # Functions for Information Extraction ##################################### def extract_name(text): """Extract candidate name from resume text""" # Look for common name patterns at the beginning of resumes lines = text.split('\n') # Check first few non-empty lines for potential names potential_name_lines = [line.strip() for line in lines[:5] if line.strip()] if potential_name_lines: # First line is often the name if it's short and doesn't contain common resume headers first_line = potential_name_lines[0] if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae", "profile"]): return first_line # Look for lines that might contain a name (not containing common keywords) for line in potential_name_lines[:3]: if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]): return line # If we couldn't find a clear name return "Unknown (please extract from resume)" def extract_age(text): """Extract candidate age from resume text""" # Look for common age patterns # Look for patterns like "Age: XX" or "XX years old" age_patterns = [ r'age:?\s*(\d{1,2})', r'(\d{1,2})\s*years\s*old', r'DOB:?\s*(\d{1,2})[/-](\d{1,2})[/-](\d{2,4})' ] for pattern in age_patterns: matches = re.search(pattern, text.lower()) if matches: if pattern == age_patterns[2]: # DOB pattern # Calculate age from DOB - simplified return "Mentioned in DOB format" else: return matches.group(1) return "Not specified" def extract_industry(text, summary): """Extract expected job industry from resume""" # Look for industry-related keywords industry_keywords = { "technology": ["software", "programming", "developer", "IT", "tech", "computer", "web", "data science"], "finance": ["banking", "investment", "financial", "accounting", "finance", "analyst"], "healthcare": ["medical", "health", "hospital", "clinical", "nurse", "doctor", "patient"], "education": ["teaching", "teacher", "professor", "academic", "education", "school", "university"], "marketing": ["marketing", "advertising", "brand", "digital marketing", "SEO", "social media"], "engineering": ["mechanical", "civil", "electrical", "engineer", "engineering"], "consulting": ["consultant", "consulting", "advisory"], "data science": ["data science", "machine learning", "AI", "analytics", "big data"], "information systems": ["information systems", "ERP", "CRM", "database", "systems management"] } # Count occurrences of industry keywords counts = {} text_lower = text.lower() for industry, keywords in industry_keywords.items(): counts[industry] = sum(text_lower.count(keyword.lower()) for keyword in keywords) # Get the industry with the highest count if counts: likely_industry = max(counts.items(), key=lambda x: x[1]) if likely_industry[1] > 0: return likely_industry[0].capitalize() # Check for educational background that might indicate industry degrees = ["computer science", "business", "engineering", "medicine", "law", "education", "finance", "marketing", "information systems"] for degree in degrees: if degree in text_lower: return f"{degree.capitalize()}-related field" return "Not clearly specified (review resume for details)" def extract_skills(text, summary): """Extract key skills from resume""" # Common skill categories and associated keywords skill_categories = { "Programming": ["Python", "Java", "C++", "JavaScript", "HTML", "CSS", "SQL", "R", "C#", "PHP", "Ruby", "Swift", "TypeScript", "Go", "Scala", "Kotlin", "Rust"], "Data Science": ["Machine Learning", "Deep Learning", "NLP", "Data Analysis", "Statistics", "Big Data", "Data Visualization", "TensorFlow", "PyTorch", "Neural Networks", "Regression", "Classification", "Clustering"], "Database": ["SQL", "MySQL", "PostgreSQL", "MongoDB", "Oracle", "SQLite", "NoSQL", "Database Design", "Data Modeling", "ETL", "Data Warehousing"], "Web Development": ["React", "Angular", "Vue.js", "Node.js", "Django", "Flask", "Express", "RESTful API", "Frontend", "Backend", "Full-Stack", "Responsive Design"], "Software Development": ["Agile", "Scrum", "Kanban", "Git", "CI/CD", "TDD", "OOP", "Design Patterns", "Microservices", "DevOps", "Docker", "Kubernetes"], "Cloud": ["AWS", "Azure", "Google Cloud", "Cloud Computing", "S3", "EC2", "Lambda", "Serverless", "Cloud Architecture", "IaaS", "PaaS", "SaaS"], "Business": ["Project Management", "Business Analysis", "Communication", "Teamwork", "Leadership", "Strategy", "Negotiation", "Presentation", "Time Management"], "Tools": ["Excel", "PowerPoint", "Tableau", "Power BI", "JIRA", "Confluence", "Slack", "Microsoft Office", "Adobe", "Photoshop", "Salesforce"] } # Find skills mentioned in the resume found_skills = [] text_lower = text.lower() for category, skills in skill_categories.items(): category_skills = [] for skill in skills: # Check for case-insensitive match but preserve original case in output if skill.lower() in text_lower: category_skills.append(skill) if category_skills: found_skills.append(f"{category}: {', '.join(category_skills)}") if found_skills: return "\n• " + "\n• ".join(found_skills) else: return "No specific technical skills clearly identified (review resume for details)" ##################################### # Function: Summarize Resume Text ##################################### def summarize_resume_text(resume_text, models): """ Generates a structured summary of the resume text including name, age, expected job industry, and skills of the candidate. """ start_time = time.time() summarizer = models['summarizer'] # First, generate a general summary max_input_length = 1024 # Model limit if len(resume_text) > max_input_length: chunks = [resume_text[i:i+max_input_length] for i in range(0, min(len(resume_text), 3*max_input_length), max_input_length)] summaries = [] for chunk in chunks: chunk_summary = summarizer(chunk, max_length=150, min_length=30, do_sample=False)[0]['summary_text'] summaries.append(chunk_summary) base_summary = " ".join(summaries) else: base_summary = summarizer(resume_text, max_length=150, min_length=30, do_sample=False)[0]['summary_text'] # Extract specific information using custom extraction logic name = extract_name(resume_text) age = extract_age(resume_text) industry = extract_industry(resume_text, base_summary) skills = extract_skills(resume_text, base_summary) # Format the structured summary formatted_summary = f"Name: {name}\n" formatted_summary += f"Age: {age}\n" formatted_summary += f"Expected Job Industry: {industry}\n" formatted_summary += f"Skills: {skills}" execution_time = time.time() - start_time return formatted_summary, execution_time ##################################### # Function: Compare Candidate Summary to Company Prompt ##################################### def compute_suitability(candidate_summary, company_prompt, models): """ Compute the similarity between candidate summary and company prompt. Returns a score in the range [0, 1] and execution time. """ start_time = time.time() feature_extractor = models['feature_extractor'] # Extract features (embeddings) candidate_features = feature_extractor(candidate_summary) company_features = feature_extractor(company_prompt) # Convert to numpy arrays and flatten if needed candidate_vec = np.mean(np.array(candidate_features[0]), axis=0) company_vec = np.mean(np.array(company_features[0]), axis=0) # Compute cosine similarity (1 - cosine distance) similarity = 1 - cosine(candidate_vec, company_vec) execution_time = time.time() - start_time return similarity, execution_time ##################################### # Main Streamlit Interface ##################################### st.title("Resume Analyzer and Company Suitability Checker") st.markdown( """ Upload your resume file in **.docx**, **.doc**, or **.txt** format. The app performs the following tasks: 1. Extracts text from the resume. 2. Uses AI to generate a structured candidate summary with name, age, expected job industry, and skills. 3. Compares the candidate summary with a company profile to produce a suitability score. """ ) # File uploader uploaded_file = st.file_uploader("Upload your resume (.docx, .doc, or .txt)", type=["docx", "doc", "txt"]) # Company description text area company_prompt = st.text_area( "Enter the company description or job requirements:", height=150, help="Enter a detailed description of the company culture, role requirements, and desired skills.", ) # Process button if uploaded_file is not None and company_prompt and st.button("Analyze Resume"): with st.spinner("Processing..."): # Extract text from resume resume_text = extract_text_from_file(uploaded_file) if resume_text.startswith("Error") or resume_text == "Unsupported file type. Please upload a .docx, .doc, or .txt file.": st.error(resume_text) else: # Generate summary summary, summarization_time = summarize_resume_text(resume_text, models) # Display summary st.subheader("Candidate Summary") st.markdown(summary) st.info(f"Summarization completed in {summarization_time:.2f} seconds") # Only compute similarity if company description is provided if company_prompt: similarity_score, similarity_time = compute_suitability(summary, company_prompt, models) # Display similarity score st.subheader("Suitability Assessment") st.markdown(f"**Matching Score:** {similarity_score:.2%}") st.info(f"Similarity computation completed in {similarity_time:.2f} seconds") # Provide interpretation if similarity_score >= 0.85: st.success("Excellent match! This candidate's profile is strongly aligned with the company requirements.") elif similarity_score >= 0.70: st.success("Good match! This candidate shows strong potential for the position.") elif similarity_score >= 0.50: st.warning("Moderate match. The candidate meets some requirements but there may be gaps.") else: st.error("Low match. The candidate's profile may not align well with the requirements.")