import os, io, re, time, tempfile import streamlit as st import docx, docx2txt import pandas as pd from functools import lru_cache # Handle imports try: from transformers import pipeline has_pipeline = True except ImportError: from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForSeq2SeqLM import torch has_pipeline = False # Setup page st.set_page_config(page_title="Resume-Job Fit Analyzer", initial_sidebar_state="collapsed") st.markdown("""""", unsafe_allow_html=True) ##################################### # Model Loading & Text Processing ##################################### @st.cache_resource def load_models(): with st.spinner("Loading AI models..."): models = {} # Load summarization model if has_pipeline: models['summarizer'] = pipeline("summarization", model="Falconsai/text_summarization", max_length=100) else: try: models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization") models['summarizer_tokenizer'] = AutoTokenizer.from_pretrained("Falconsai/text_summarization") except Exception as e: st.error(f"Error loading summarization model: {e}") models['summarizer_model'] = models['summarizer_tokenizer'] = None # Load evaluation model if has_pipeline: models['evaluator'] = pipeline("sentiment-analysis", model="CR7CAD/RobertaFinetuned") else: try: models['evaluator_model'] = AutoModelForSequenceClassification.from_pretrained("CR7CAD/RobertaFinetuned") models['evaluator_tokenizer'] = AutoTokenizer.from_pretrained("CR7CAD/RobertaFinetuned") except Exception as e: st.error(f"Error loading sentiment model: {e}") models['evaluator_model'] = models['evaluator_tokenizer'] = None return models def summarize_text(text, models, max_length=100): """Summarize text with fallbacks""" input_text = text[:1024] # Try pipeline if has_pipeline and 'summarizer' in models: try: return models['summarizer'](input_text)[0]['summary_text'] except: pass # Try manual model if 'summarizer_model' in models and models['summarizer_model']: try: tokenizer = models['summarizer_tokenizer'] model = models['summarizer_model'] inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024) summary_ids = model.generate(inputs.input_ids, max_length=max_length, min_length=30, num_beams=4) return tokenizer.decode(summary_ids[0], skip_special_tokens=True) except: pass # Fallback - extract sentences sentences = re.split(r'(?= 4] scored.sort(reverse=True) result, length = [], 0 for _, sentence in scored: if length + len(sentence.split()) <= max_length: result.append(sentence) length += len(sentence.split()) if result: ordered = sorted([(sentences.index(s), s) for s in result]) return " ".join(s for _, s in ordered) return "" ##################################### # File Processing & Information Extraction ##################################### @st.cache_data def extract_text_from_file(file_obj): ext = os.path.splitext(file_obj.name)[1].lower() if ext == ".docx": try: document = docx.Document(file_obj) return "\n".join(para.text for para in document.paragraphs if para.text.strip())[:15000] except Exception as e: return f"Error processing DOCX file: {e}" elif ext == ".doc": try: with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file: temp_file.write(file_obj.getvalue()) text = docx2txt.process(temp_file.name) os.unlink(temp_file.name) return text[:15000] except Exception as e: return f"Error processing DOC file: {e}" elif ext == ".txt": try: return file_obj.getvalue().decode("utf-8")[:15000] except Exception as e: return f"Error processing TXT file: {e}" else: return "Unsupported file type. Please upload a .docx, .doc, or .txt file." # Information extraction functions def extract_skills(text): """Extract skills from text""" skill_keywords = { "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "React", "Angular"], "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "NLP"], "Database": ["SQL", "MySQL", "MongoDB", "PostgreSQL", "Oracle", "Redis"], "Web Dev": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API"], "Software Dev": ["Agile", "Scrum", "Git", "DevOps", "Docker", "CI/CD", "Jenkins"], "Cloud": ["AWS", "Azure", "Google Cloud", "Lambda", "S3", "EC2"], "Business": ["Project Management", "Leadership", "Teamwork", "Agile", "Scrum"] } text_lower = text.lower() return [skill for _, skills in skill_keywords.items() for skill in skills if skill.lower() in text_lower] @lru_cache(maxsize=32) def extract_name(text_start): lines = [line.strip() for line in text_start.split('\n')[:5] if line.strip()] if lines: first_line = lines[0] if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae"]): return first_line for line in lines[:3]: if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]): return line return "Unknown" def extract_age(text): for pattern in [r'age:?\s*(\d{1,2})', r'(\d{1,2})\s*years\s*old', r'dob:.*(\d{4})', r'date of birth:.*(\d{4})']: match = re.search(pattern, text.lower()) if match: if len(match.group(1)) == 4: # Birth year try: return str(2025 - int(match.group(1))) except: pass return match.group(1) return "Not specified" def extract_industry(text): industries = { "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"], "Finance": ["banking", "financial", "accounting", "finance", "analyst"], "Healthcare": ["medical", "health", "hospital", "clinical", "nurse", "doctor"], "Education": ["teaching", "teacher", "professor", "education", "university", "school"], "Marketing": ["marketing", "advertising", "digital marketing", "social media", "brand"], "Engineering": ["engineer", "engineering", "mechanical", "civil", "electrical"], "Data Science": ["data science", "machine learning", "AI", "analytics", "big data"], "Management": ["manager", "management", "leadership", "executive", "director"] } text_lower = text.lower() counts = {ind: sum(text_lower.count(kw) for kw in kws) for ind, kws in industries.items()} return max(counts.items(), key=lambda x: x[1])[0] if any(counts.values()) else "Not specified" def extract_job_position(text): text_lower = text.lower() for pattern in [r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'seeking.*position.*as\s*([^.]*)']: match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL) if match: text = match.group(1).strip() for title in ["developer", "engineer", "analyst", "manager", "specialist", "designer"]: if title in text: return next((m.group(1).strip().title() for m in [re.search(r'(\w+\s+' + title + r')', text)] if m), title.title()) return " ".join(text.split()[:10]).title() + "..." if len(text.split()) > 10 else text.title() # Check for job title near experience for pattern in [r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\()', r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*(?:current|present)']: match = re.search(pattern, text_lower, re.IGNORECASE) if match: return match.group(1).strip().title() return "Not specified" ##################################### # Core Analysis Functions ##################################### def summarize_resume_text(resume_text, models): start = time.time() # Basic info extraction name = extract_name(resume_text[:500]) age = extract_age(resume_text) industry = extract_industry(resume_text) job_position = extract_job_position(resume_text) skills = extract_skills(resume_text) # Generate summary try: if has_pipeline and 'summarizer' in models: model_summary = models['summarizer'](resume_text[:2000], max_length=100, min_length=30)[0]['summary_text'] else: model_summary = summarize_text(resume_text, models, max_length=100) except: model_summary = "Error generating summary." # Format result summary = f"Name: {name}\n\nAge: {age}\n\nExpected Industry: {industry}\n\n" summary += f"Expected Job Position: {job_position}\n\nSkills: {', '.join(skills)}\n\nSummary: {model_summary}" return summary, time.time() - start def extract_job_requirements(job_description, models): tech_skills = [ "Python", "Java", "JavaScript", "SQL", "HTML", "CSS", "React", "Angular", "Machine Learning", "AWS", "Azure", "Docker", "MySQL", "MongoDB", "Project Management", "Agile", "Leadership", "Git", "DevOps" ] clean_text = job_description.lower() # Extract job title job_title = "Not specified" for pattern in [r'^([^:.\n]+?)(position|role|job)', r'^([^:.\n]+?)\n', r'hiring.*? ([^:.\n]+?)(:-|[.:]|\n|$)']: match = re.search(pattern, clean_text, re.IGNORECASE) if match: title = match.group(1).strip() if len(match.groups()) >= 1 else match.group(2).strip() if 3 <= len(title) <= 50: job_title = title.capitalize() break # Extract years required years_required = 0 for pattern in [r'(\d+)(?:\+)?\s*(?:years|yrs).*?experience', r'experience.*?(\d+)(?:\+)?\s*(?:years|yrs)']: match = re.search(pattern, clean_text, re.IGNORECASE) if match: try: years_required = int(match.group(1)) break except: pass # Extract skills required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_text)] # Fallback if no skills found if not required_skills: words = [w for w in re.findall(r'\b\w{4,}\b', clean_text) if w not in ["with", "that", "this", "have", "from", "they", "will", "what", "your"]] word_counts = {} for w in words: word_counts[w] = word_counts.get(w, 0) + 1 required_skills = [w.capitalize() for w, _ in sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:5]] return { "title": job_title, "years_experience": years_required, "required_skills": required_skills, "summary": summarize_text(job_description, models, max_length=100) } def evaluate_job_fit(resume_summary, job_requirements, models): start = time.time() # Basic extraction required_skills = job_requirements["required_skills"] years_required = job_requirements["years_experience"] job_title = job_requirements["title"] skills_mentioned = extract_skills(resume_summary) # Calculate matches matching_skills = [skill for skill in required_skills if skill in skills_mentioned] skill_match = len(matching_skills) / len(required_skills) if required_skills else 0 # Extract experience years_experience = 0 exp_match = re.search(r'(\d+)\+?\s*years?\s*(?:of)?\s*experience', resume_summary, re.IGNORECASE) if exp_match: try: years_experience = int(exp_match.group(1)) except: pass # Calculate scores exp_match_ratio = min(1.0, years_experience / max(1, years_required)) if years_required > 0 else 0.5 title_words = [w for w in job_title.lower().split() if len(w) > 3] title_match = sum(1 for w in title_words if w in resume_summary.lower()) / len(title_words) if title_words else 0 # Final scores skill_score = min(2, skill_match * 3) exp_score = min(2, exp_match_ratio * 2) title_score = min(2, title_match * 2) # Extract candidate info name = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary) name = name.group(1).strip() if name else "The candidate" industry = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary) industry = industry.group(1).strip() if industry else "unspecified industry" # Calculate weighted score weighted_score = (skill_score * 0.5) + (exp_score * 0.3) + (title_score * 0.2) fit_score = 2 if weighted_score >= 1.5 else (1 if weighted_score >= 0.8 else 0) # Generate assessment missing = [skill for skill in required_skills if skill not in skills_mentioned] if fit_score == 2: assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} appears well-suited for this role's requirements." elif fit_score == 1: assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role but has gaps in certain areas. Additional training might be needed in {', '.join(missing[:2])}." else: assessment = f"{fit_score}: NO FIT - {name}'s background shows limited alignment with this {job_title} position. Their experience and skills differ significantly from the requirements." return assessment, fit_score, time.time() - start def analyze_job_fit(resume_summary, job_description, models): start = time.time() job_requirements = extract_job_requirements(job_description, models) assessment, fit_score, _ = evaluate_job_fit(resume_summary, job_requirements, models) return assessment, fit_score, time.time() - start ##################################### # Main Function ##################################### def main(): st.title("Resume-Job Fit Analyzer") st.markdown("Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match.") # Load models and get inputs models = load_models() uploaded_file = st.file_uploader("Upload your resume", type=["docx", "doc", "txt"]) job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...") # Process when button clicked if uploaded_file and job_description and st.button("Analyze Job Fit"): progress = st.progress(0) status = st.empty() # Step 1: Extract text status.text("Step 1/3: Extracting text from resume...") resume_text = extract_text_from_file(uploaded_file) progress.progress(25) if resume_text.startswith("Error") or resume_text == "Unsupported file type. Please upload a .docx, .doc, or .txt file.": st.error(resume_text) else: # Step 2: Generate summary status.text("Step 2/3: Analyzing resume...") summary, summary_time = summarize_resume_text(resume_text, models) progress.progress(50) st.subheader("Your Resume Summary") st.markdown(summary) # Step 3: Evaluate fit status.text("Step 3/3: Evaluating job fit...") assessment, fit_score, eval_time = analyze_job_fit(summary, job_description, models) progress.progress(100) status.empty() # Display results st.subheader("Job Fit Assessment") fit_labels = {0: "NO FIT", 1: "POTENTIAL FIT", 2: "GOOD FIT"} colors = {0: "red", 1: "orange", 2: "green"} st.markdown(f"

{fit_labels[fit_score]}

", unsafe_allow_html=True) st.markdown(assessment) st.info(f"Analysis completed in {(summary_time + eval_time):.2f} seconds") # Recommendations st.subheader("Recommended Next Steps") if fit_score == 2: st.markdown(""" - Apply for this position as you appear to be a good match - Prepare for interviews by focusing on your relevant experience - Highlight your matching skills in your cover letter """) elif fit_score == 1: st.markdown(""" - Consider applying but address skill gaps in your cover letter - Emphasize transferable skills and relevant experience - Prepare to discuss how you can quickly develop missing skills """) else: st.markdown(""" - Look for positions better aligned with your current skills - If interested in this field, focus on developing the required skills - Consider similar roles with fewer experience requirements """) # Run the main function if __name__ == "__main__": main()