|
import streamlit as st |
|
import pandas as pd |
|
import numpy as np |
|
import torch |
|
import nltk |
|
import os |
|
import tempfile |
|
import base64 |
|
from rank_bm25 import BM25Okapi |
|
from sentence_transformers import SentenceTransformer, CrossEncoder |
|
from nltk.tokenize import word_tokenize |
|
import pdfplumber |
|
import PyPDF2 |
|
from docx import Document |
|
import csv |
|
from datasets import load_dataset |
|
import gc |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import time |
|
import faiss |
|
import re |
|
|
|
|
|
try: |
|
nltk.data.find('tokenizers/punkt') |
|
except LookupError: |
|
nltk.download('punkt') |
|
|
|
|
|
st.set_page_config( |
|
page_title="AI Resume Screener", |
|
page_icon="π―", |
|
layout="wide", |
|
initial_sidebar_state="expanded" |
|
) |
|
|
|
|
|
with st.sidebar: |
|
st.title("βοΈ Configuration") |
|
|
|
|
|
st.subheader("Ranking Weights") |
|
semantic_weight = st.slider("Semantic Similarity Weight", 0.0, 1.0, 0.7, 0.1) |
|
keyword_weight = 1.0 - semantic_weight |
|
st.write(f"Keyword Weight: {keyword_weight:.1f}") |
|
|
|
|
|
st.subheader("Advanced Options") |
|
top_k = st.selectbox("Number of results to display", options=[1, 2, 3, 4, 5], index=4) |
|
|
|
|
|
st.subheader("LLM Settings") |
|
use_llm_explanations = st.checkbox("Generate AI Explanations", value=True) |
|
if use_llm_explanations: |
|
hf_token = st.text_input("Hugging Face Token (optional)", type="password", |
|
help="Enter your HF token for better rate limits") |
|
|
|
st.markdown("---") |
|
st.markdown("### π€ Advanced Pipeline") |
|
st.markdown("- **Stage 1**: FAISS Recall (Top 50)") |
|
st.markdown("- **Stage 2**: Cross-Encoder Re-ranking (Top 20)") |
|
st.markdown("- **Stage 3**: BM25 Keyword Matching") |
|
st.markdown("- **Stage 4**: LLM Intent Analysis") |
|
st.markdown("- **Final**: Combined Scoring (Top 5)") |
|
st.markdown("### π Models Used") |
|
st.markdown("- **Embedding**: BAAI/bge-large-en-v1.5") |
|
st.markdown("- **Cross-Encoder**: ms-marco-MiniLM-L6-v2") |
|
st.markdown("- **LLM Explanations**: Qwen/Qwen3-14B") |
|
st.markdown("- **Intent Analysis**: Qwen/Qwen3-1.7B") |
|
st.markdown("### π Scoring Formula") |
|
st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**") |
|
|
|
|
|
if 'embedding_model' not in st.session_state: |
|
st.session_state.embedding_model = None |
|
if 'cross_encoder' not in st.session_state: |
|
st.session_state.cross_encoder = None |
|
if 'results' not in st.session_state: |
|
st.session_state.results = [] |
|
if 'resume_texts' not in st.session_state: |
|
st.session_state.resume_texts = [] |
|
if 'file_names' not in st.session_state: |
|
st.session_state.file_names = [] |
|
if 'explanations_generated' not in st.session_state: |
|
st.session_state.explanations_generated = False |
|
if 'current_job_description' not in st.session_state: |
|
st.session_state.current_job_description = "" |
|
if 'qwen3_tokenizer' not in st.session_state: |
|
st.session_state.qwen3_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-14B") |
|
if 'qwen3_model' not in st.session_state: |
|
st.session_state.qwen3_model = AutoModelForCausalLM.from_pretrained( |
|
"Qwen/Qwen3-14B", |
|
torch_dtype="auto", |
|
device_map="auto" |
|
) |
|
|
|
if 'qwen3_intent_tokenizer' not in st.session_state: |
|
st.session_state.qwen3_intent_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-1.7B") |
|
if 'qwen3_intent_model' not in st.session_state: |
|
st.session_state.qwen3_intent_model = AutoModelForCausalLM.from_pretrained( |
|
"Qwen/Qwen3-1.7B", |
|
torch_dtype="auto", |
|
device_map="auto" |
|
) |
|
|
|
@st.cache_resource |
|
def load_embedding_model(): |
|
"""Load and cache the BGE embedding model""" |
|
try: |
|
with st.spinner("π Loading BAAI/bge-large-en-v1.5 model..."): |
|
model = SentenceTransformer('BAAI/bge-large-en-v1.5') |
|
st.success("β
Embedding model loaded successfully!") |
|
return model |
|
except Exception as e: |
|
st.error(f"β Error loading embedding model: {str(e)}") |
|
return None |
|
|
|
@st.cache_resource |
|
def load_cross_encoder(): |
|
"""Load and cache the Cross-Encoder model""" |
|
try: |
|
with st.spinner("π Loading Cross-Encoder ms-marco-MiniLM-L6-v2..."): |
|
from sentence_transformers import CrossEncoder |
|
model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2') |
|
st.success("β
Cross-Encoder model loaded successfully!") |
|
return model |
|
except Exception as e: |
|
st.error(f"β Error loading Cross-Encoder model: {str(e)}") |
|
return None |
|
|
|
def generate_qwen3_response(prompt, tokenizer, model, max_new_tokens=200): |
|
messages = [{"role": "user", "content": prompt}] |
|
text = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True, |
|
enable_thinking=True |
|
) |
|
model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
|
generated_ids = model.generate( |
|
**model_inputs, |
|
max_new_tokens=max_new_tokens |
|
) |
|
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() |
|
response = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n") |
|
return response |
|
|
|
class ResumeScreener: |
|
def __init__(self): |
|
|
|
self.embedding_model = load_embedding_model() |
|
self.cross_encoder = load_cross_encoder() |
|
|
|
def extract_text_from_file(self, file_path, file_type): |
|
"""Extract text from various file types""" |
|
try: |
|
if file_type == "pdf": |
|
with open(file_path, 'rb') as file: |
|
with pdfplumber.open(file) as pdf: |
|
text = "" |
|
for page in pdf.pages: |
|
text += page.extract_text() or "" |
|
|
|
if not text.strip(): |
|
|
|
file.seek(0) |
|
reader = PyPDF2.PdfReader(file) |
|
text = "" |
|
for page in reader.pages: |
|
text += page.extract_text() or "" |
|
return text |
|
|
|
elif file_type == "docx": |
|
doc = Document(file_path) |
|
return " ".join([paragraph.text for paragraph in doc.paragraphs]) |
|
|
|
elif file_type == "txt": |
|
with open(file_path, 'r', encoding='utf-8') as file: |
|
return file.read() |
|
|
|
elif file_type == "csv": |
|
with open(file_path, 'r', encoding='utf-8') as file: |
|
csv_reader = csv.reader(file) |
|
return " ".join([" ".join(row) for row in csv_reader]) |
|
|
|
except Exception as e: |
|
st.error(f"Error extracting text from {file_path}: {str(e)}") |
|
return "" |
|
|
|
def get_embedding(self, text): |
|
"""Generate embedding for text using BGE model""" |
|
if self.embedding_model is None: |
|
st.error("No embedding model loaded!") |
|
return np.zeros(1024) |
|
|
|
try: |
|
|
|
|
|
if len(text) < 500: |
|
text = "Represent this sentence for searching relevant passages: " + text |
|
|
|
|
|
text = text[:8192] if text else "" |
|
|
|
|
|
embedding = self.embedding_model.encode(text, |
|
convert_to_numpy=True, |
|
normalize_embeddings=True) |
|
return embedding |
|
|
|
except Exception as e: |
|
st.error(f"Error generating embedding: {str(e)}") |
|
return np.zeros(1024) |
|
|
|
def calculate_bm25_scores(self, resume_texts, job_description): |
|
"""Calculate BM25 scores for keyword matching""" |
|
try: |
|
job_tokens = word_tokenize(job_description.lower()) |
|
corpus = [word_tokenize(text.lower()) for text in resume_texts if text and text.strip()] |
|
|
|
if not corpus: |
|
return [0.0] * len(resume_texts) |
|
|
|
bm25 = BM25Okapi(corpus) |
|
scores = bm25.get_scores(job_tokens) |
|
return scores.tolist() |
|
|
|
except Exception as e: |
|
st.error(f"Error calculating BM25 scores: {str(e)}") |
|
return [0.0] * len(resume_texts) |
|
|
|
def advanced_pipeline_ranking(self, resume_texts, job_description, final_top_k=5): |
|
"""Advanced pipeline: FAISS recall -> Cross-encoder -> BM25 -> LLM intent -> Final ranking""" |
|
if not resume_texts: |
|
return [] |
|
|
|
|
|
st.write("π **Stage 1**: FAISS Recall - Finding top 50 candidates...") |
|
top_50_indices = self.faiss_recall(resume_texts, job_description, top_k=50) |
|
|
|
|
|
st.write("π― **Stage 2**: Cross-Encoder Re-ranking - Selecting top 20...") |
|
top_20_results = self.cross_encoder_rerank(resume_texts, job_description, top_50_indices, top_k=20) |
|
|
|
|
|
st.write("π€ **Stage 3**: BM25 Keyword Matching...") |
|
top_20_with_bm25 = self.add_bm25_scores(resume_texts, job_description, top_20_results) |
|
|
|
|
|
st.write("π€ **Stage 4**: LLM Intent Analysis...") |
|
top_20_with_intent = self.add_intent_scores(resume_texts, job_description, top_20_with_bm25) |
|
|
|
|
|
st.write(f"π **Stage 5**: Final Combined Ranking - Selecting top {final_top_k}...") |
|
final_results = self.calculate_final_scores(top_20_with_intent) |
|
|
|
return final_results[:final_top_k] |
|
|
|
def faiss_recall(self, resume_texts, job_description, top_k=50): |
|
"""Stage 1: Use FAISS for initial recall to find top 50 resumes""" |
|
try: |
|
|
|
job_embedding = self.get_embedding(job_description) |
|
|
|
|
|
resume_embeddings = [] |
|
progress_bar = st.progress(0) |
|
|
|
for i, text in enumerate(resume_texts): |
|
if text: |
|
embedding = self.embedding_model.encode(text[:8192], |
|
convert_to_numpy=True, |
|
normalize_embeddings=True) |
|
resume_embeddings.append(embedding) |
|
else: |
|
resume_embeddings.append(np.zeros(1024)) |
|
progress_bar.progress((i + 1) / len(resume_texts)) |
|
|
|
progress_bar.empty() |
|
|
|
|
|
resume_embeddings = np.array(resume_embeddings).astype('float32') |
|
dimension = resume_embeddings.shape[1] |
|
index = faiss.IndexFlatIP(dimension) |
|
index.add(resume_embeddings) |
|
|
|
|
|
job_embedding = job_embedding.reshape(1, -1).astype('float32') |
|
scores, indices = index.search(job_embedding, min(top_k, len(resume_texts))) |
|
|
|
return indices[0].tolist() |
|
|
|
except Exception as e: |
|
st.error(f"Error in FAISS recall: {str(e)}") |
|
|
|
return list(range(min(top_k, len(resume_texts)))) |
|
|
|
def cross_encoder_rerank(self, resume_texts, job_description, top_50_indices, top_k=20): |
|
"""Stage 2: Use Cross-Encoder to re-rank top 50 and select top 20""" |
|
try: |
|
if not self.cross_encoder: |
|
st.error("Cross-encoder not loaded!") |
|
return [(idx, 0.0) for idx in top_50_indices[:top_k]] |
|
|
|
|
|
pairs = [] |
|
valid_indices = [] |
|
|
|
for idx in top_50_indices: |
|
if idx < len(resume_texts) and resume_texts[idx]: |
|
|
|
job_snippet = job_description[:512] |
|
resume_snippet = resume_texts[idx][:512] |
|
pairs.append([job_snippet, resume_snippet]) |
|
valid_indices.append(idx) |
|
|
|
if not pairs: |
|
return [(idx, 0.0) for idx in top_50_indices[:top_k]] |
|
|
|
|
|
progress_bar = st.progress(0) |
|
scores = [] |
|
|
|
|
|
batch_size = 8 |
|
for i in range(0, len(pairs), batch_size): |
|
batch = pairs[i:i+batch_size] |
|
batch_scores = self.cross_encoder.predict(batch) |
|
scores.extend(batch_scores) |
|
progress_bar.progress(min(1.0, (i + batch_size) / len(pairs))) |
|
|
|
progress_bar.empty() |
|
|
|
|
|
indexed_scores = list(zip(valid_indices, scores)) |
|
indexed_scores.sort(key=lambda x: x[1], reverse=True) |
|
|
|
return indexed_scores[:top_k] |
|
|
|
except Exception as e: |
|
st.error(f"Error in cross-encoder re-ranking: {str(e)}") |
|
return [(idx, 0.0) for idx in top_50_indices[:top_k]] |
|
|
|
def add_bm25_scores(self, resume_texts, job_description, top_20_results): |
|
"""Stage 3: Add BM25 scores to top 20 resumes""" |
|
try: |
|
|
|
top_20_texts = [resume_texts[idx] for idx, _ in top_20_results] |
|
|
|
|
|
bm25_scores = self.calculate_bm25_scores(top_20_texts, job_description) |
|
|
|
|
|
if bm25_scores and max(bm25_scores) > 0: |
|
max_bm25 = max(bm25_scores) |
|
min_bm25 = min(bm25_scores) |
|
if max_bm25 > min_bm25: |
|
normalized_bm25 = [ |
|
0.1 + 0.1 * (score - min_bm25) / (max_bm25 - min_bm25) |
|
for score in bm25_scores |
|
] |
|
else: |
|
normalized_bm25 = [0.15] * len(bm25_scores) |
|
else: |
|
normalized_bm25 = [0.15] * len(top_20_results) |
|
|
|
|
|
results_with_bm25 = [] |
|
for i, (idx, cross_score) in enumerate(top_20_results): |
|
bm25_score = normalized_bm25[i] if i < len(normalized_bm25) else 0.15 |
|
results_with_bm25.append((idx, cross_score, bm25_score)) |
|
|
|
return results_with_bm25 |
|
|
|
except Exception as e: |
|
st.error(f"Error adding BM25 scores: {str(e)}") |
|
return [(idx, cross_score, 0.15) for idx, cross_score in top_20_results] |
|
|
|
def add_intent_scores(self, resume_texts, job_description, top_20_with_bm25): |
|
"""Stage 4: Add LLM intent analysis scores""" |
|
try: |
|
results_with_intent = [] |
|
progress_bar = st.progress(0) |
|
|
|
for i, (idx, cross_score, bm25_score) in enumerate(top_20_with_bm25): |
|
intent_score = self.analyze_intent(resume_texts[idx], job_description) |
|
results_with_intent.append((idx, cross_score, bm25_score, intent_score)) |
|
progress_bar.progress((i + 1) / len(top_20_with_bm25)) |
|
|
|
progress_bar.empty() |
|
return results_with_intent |
|
|
|
except Exception as e: |
|
st.error(f"Error adding intent scores: {str(e)}") |
|
return [(idx, cross_score, bm25_score, 0.1) for idx, cross_score, bm25_score in top_20_with_bm25] |
|
|
|
def analyze_intent(self, resume_text, job_description): |
|
"""Analyze candidate's intent using LLM""" |
|
try: |
|
|
|
resume_snippet = resume_text[:1500] if len(resume_text) > 1500 else resume_text |
|
job_snippet = job_description[:800] if len(job_description) > 800 else job_description |
|
|
|
prompt = f"""You are given a job description and a candidate's resume. |
|
Clearly answer: "Is the candidate likely seeking this job? Respond with 'Yes', 'Maybe', or 'No' and give a brief justification." |
|
|
|
Job Description: |
|
{job_snippet} |
|
|
|
Candidate Resume: |
|
{resume_snippet} |
|
|
|
Response format: |
|
Intent: [Yes/Maybe/No] |
|
Reason: [Brief justification]""" |
|
|
|
response = generate_qwen3_response( |
|
prompt, |
|
st.session_state.qwen3_intent_tokenizer, |
|
st.session_state.qwen3_intent_model, |
|
max_new_tokens=100 |
|
) |
|
|
|
|
|
response_lower = response.lower() |
|
if 'intent: yes' in response_lower or 'intent:yes' in response_lower: |
|
return 0.3 |
|
elif 'intent: maybe' in response_lower or 'intent:maybe' in response_lower: |
|
return 0.1 |
|
else: |
|
return 0.0 |
|
|
|
except Exception as e: |
|
st.warning(f"Error analyzing intent: {str(e)}") |
|
return 0.1 |
|
|
|
def calculate_final_scores(self, results_with_all_scores): |
|
"""Stage 5: Calculate final combined scores""" |
|
try: |
|
final_results = [] |
|
|
|
for idx, cross_score, bm25_score, intent_score in results_with_all_scores: |
|
|
|
normalized_cross = max(0, min(1, cross_score)) |
|
|
|
|
|
final_score = normalized_cross + bm25_score + intent_score |
|
|
|
final_results.append({ |
|
'index': idx, |
|
'cross_encoder_score': normalized_cross, |
|
'bm25_score': bm25_score, |
|
'intent_score': intent_score, |
|
'final_score': final_score |
|
}) |
|
|
|
|
|
final_results.sort(key=lambda x: x['final_score'], reverse=True) |
|
|
|
return final_results |
|
|
|
except Exception as e: |
|
st.error(f"Error calculating final scores: {str(e)}") |
|
return [] |
|
|
|
def extract_skills(self, text, job_description): |
|
"""Extract skills from resume based on job description""" |
|
if not text: |
|
return [] |
|
|
|
|
|
common_skills = [ |
|
"python", "java", "javascript", "react", "angular", "vue", "node.js", |
|
"express", "django", "flask", "spring", "sql", "nosql", "html", "css", |
|
"aws", "azure", "gcp", "docker", "kubernetes", "jenkins", "git", "github", |
|
"agile", "scrum", "jira", "ci/cd", "devops", "microservices", "rest", "api", |
|
"machine learning", "deep learning", "data science", "artificial intelligence", |
|
"tensorflow", "pytorch", "keras", "scikit-learn", "pandas", "numpy", |
|
"matplotlib", "seaborn", "jupyter", "r", "sas", "spss", "tableau", "powerbi", |
|
"excel", "mysql", "postgresql", "mongodb", "redis", "elasticsearch", |
|
"kafka", "rabbitmq", "spark", "hadoop", "hive", "airflow", "linux", "unix" |
|
] |
|
|
|
|
|
job_words = set(word.lower() for word in word_tokenize(job_description) if len(word) > 2) |
|
|
|
|
|
found_skills = [] |
|
text_lower = text.lower() |
|
|
|
|
|
for skill in common_skills: |
|
if skill in text_lower and any(skill in job_word for job_word in job_words): |
|
found_skills.append(skill) |
|
|
|
|
|
for word in job_words: |
|
if len(word) > 3 and word in text_lower and word not in found_skills: |
|
|
|
if word not in ['with', 'have', 'that', 'this', 'from', 'what', 'when', 'where']: |
|
found_skills.append(word) |
|
|
|
return list(set(found_skills))[:15] |
|
|
|
def generate_simple_explanation(self, score, semantic_score, bm25_score, skills): |
|
"""Generate simple explanation for the match (fallback)""" |
|
if score > 0.8: |
|
quality = "excellent" |
|
elif score > 0.6: |
|
quality = "strong" |
|
elif score > 0.4: |
|
quality = "moderate" |
|
else: |
|
quality = "limited" |
|
|
|
explanation = f"This candidate shows {quality} alignment with the position (score: {score:.2f}). " |
|
|
|
if semantic_score > bm25_score: |
|
explanation += f"The resume demonstrates strong conceptual relevance ({semantic_score:.2f}) suggesting good experience fit. " |
|
else: |
|
explanation += f"The resume has high keyword match ({bm25_score:.2f}) indicating direct skill alignment. " |
|
|
|
if skills: |
|
explanation += f"Key matching competencies include: {', '.join(skills[:5])}." |
|
|
|
return explanation |
|
|
|
def generate_llm_explanation(self, resume_text, job_description, score, skills, max_retries=3): |
|
"""Generate detailed explanation using Qwen3-14B""" |
|
if not st.session_state.qwen3_model: |
|
return self.generate_simple_explanation(score, score, score, skills) |
|
|
|
|
|
resume_snippet = resume_text[:2000] if len(resume_text) > 2000 else resume_text |
|
job_snippet = job_description[:1000] if len(job_description) > 1000 else job_description |
|
|
|
prompt = f"""You are an expert HR analyst. Analyze this individual candidate's resume against the job requirements and write EXACTLY 150 words explaining why this specific candidate is suitable for the position. |
|
|
|
Structure your 150-word analysis as follows: |
|
1. Experience alignment (40-50 words) |
|
2. Key strengths and skills match (40-50 words) |
|
3. Unique value proposition (40-50 words) |
|
4. Overall recommendation (10-20 words) |
|
|
|
Job Requirements: |
|
{job_snippet} |
|
|
|
Candidate's Resume: |
|
{resume_snippet} |
|
|
|
Identified Matching Skills: {', '.join(skills[:10])} |
|
Compatibility Score: {score:.1%} |
|
|
|
Write a professional, detailed 150-word analysis for THIS INDIVIDUAL CANDIDATE:""" |
|
|
|
for attempt in range(max_retries): |
|
try: |
|
response = generate_qwen3_response( |
|
prompt, |
|
st.session_state.qwen3_tokenizer, |
|
st.session_state.qwen3_model, |
|
max_new_tokens=200 |
|
) |
|
|
|
|
|
explanation = response.strip() |
|
word_count = len(explanation.split()) |
|
|
|
|
|
if 130 <= word_count <= 170: |
|
return explanation |
|
|
|
|
|
if word_count < 130: |
|
|
|
continue |
|
elif word_count > 170: |
|
|
|
words = explanation.split() |
|
truncated = ' '.join(words[:150]) |
|
|
|
if not truncated.endswith('.'): |
|
truncated += '.' |
|
return truncated |
|
|
|
return explanation |
|
|
|
except Exception as e: |
|
if attempt < max_retries - 1: |
|
time.sleep(2) |
|
continue |
|
else: |
|
|
|
return self.generate_simple_explanation(score, score, score, skills) |
|
|
|
|
|
return self.generate_simple_explanation(score, score, score, skills) |
|
|
|
def create_download_link(df, filename="resume_screening_results.csv"): |
|
"""Create download link for results""" |
|
csv = df.to_csv(index=False) |
|
b64 = base64.b64encode(csv.encode()).decode() |
|
return f'<a href="data:file/csv;base64,{b64}" download="{filename}" class="download-btn">π₯ Download Results CSV</a>' |
|
|
|
|
|
st.title("π― AI-Powered Resume Screener") |
|
st.markdown("*Find the perfect candidates using BAAI/bge-large-en-v1.5 embeddings and Qwen3-14B explanations*") |
|
st.markdown("---") |
|
|
|
|
|
screener = ResumeScreener() |
|
|
|
|
|
st.header("π Step 1: Enter Job Description") |
|
job_description = st.text_area( |
|
"Enter the complete job description or requirements:", |
|
height=150, |
|
placeholder="Paste the job description here, including required skills, experience, and qualifications..." |
|
) |
|
|
|
|
|
st.header("π Step 2: Upload Resumes") |
|
|
|
|
|
if st.session_state.resume_texts: |
|
col1, col2 = st.columns([3, 1]) |
|
with col1: |
|
st.info(f"π {len(st.session_state.resume_texts)} resumes loaded and ready for analysis") |
|
with col2: |
|
if st.button("ποΈ Clear Resumes", type="secondary", help="Clear all loaded resumes to start fresh"): |
|
st.session_state.resume_texts = [] |
|
st.session_state.file_names = [] |
|
st.session_state.results = [] |
|
st.session_state.explanations_generated = False |
|
st.session_state.current_job_description = "" |
|
st.rerun() |
|
|
|
input_method = st.radio( |
|
"Choose input method:", |
|
["π Upload Files", "ποΈ Load from CSV Dataset", "π Load from Hugging Face Dataset"] |
|
) |
|
|
|
if input_method == "π Upload Files": |
|
uploaded_files = st.file_uploader( |
|
"Upload resume files", |
|
type=["pdf", "docx", "txt"], |
|
accept_multiple_files=True, |
|
help="Supported formats: PDF, DOCX, TXT" |
|
) |
|
|
|
if uploaded_files: |
|
with st.spinner(f"π Processing {len(uploaded_files)} files..."): |
|
resume_texts = [] |
|
file_names = [] |
|
|
|
for file in uploaded_files: |
|
file_type = file.name.split('.')[-1].lower() |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{file_type}') as tmp_file: |
|
tmp_file.write(file.getvalue()) |
|
tmp_path = tmp_file.name |
|
|
|
text = screener.extract_text_from_file(tmp_path, file_type) |
|
if text.strip(): |
|
resume_texts.append(text) |
|
file_names.append(file.name) |
|
|
|
os.unlink(tmp_path) |
|
|
|
st.session_state.resume_texts = resume_texts |
|
st.session_state.file_names = file_names |
|
|
|
if resume_texts: |
|
st.success(f"β
Successfully processed {len(resume_texts)} resumes") |
|
|
|
elif input_method == "ποΈ Load from CSV Dataset": |
|
csv_file = st.file_uploader("Upload CSV file with resume data", type=["csv"]) |
|
|
|
if csv_file: |
|
try: |
|
df = pd.read_csv(csv_file) |
|
st.write("**CSV Preview:**") |
|
st.dataframe(df.head()) |
|
|
|
text_column = st.selectbox( |
|
"Select column containing resume text:", |
|
df.columns.tolist() |
|
) |
|
|
|
name_column = st.selectbox( |
|
"Select column for candidate names/IDs (optional):", |
|
["Use Index"] + df.columns.tolist() |
|
) |
|
|
|
if st.button("π Process CSV Data"): |
|
with st.spinner("π Processing CSV data..."): |
|
resume_texts = [] |
|
file_names = [] |
|
|
|
for idx, row in df.iterrows(): |
|
text = str(row[text_column]) |
|
if text and text.strip() and text.lower() != 'nan': |
|
resume_texts.append(text) |
|
|
|
if name_column == "Use Index": |
|
file_names.append(f"Resume_{idx}") |
|
else: |
|
file_names.append(str(row[name_column])) |
|
|
|
st.session_state.resume_texts = resume_texts |
|
st.session_state.file_names = file_names |
|
|
|
if resume_texts: |
|
st.success(f"β
Successfully loaded {len(resume_texts)} resumes from CSV") |
|
|
|
except Exception as e: |
|
st.error(f"β Error processing CSV: {str(e)}") |
|
|
|
elif input_method == "π Load from Hugging Face Dataset": |
|
st.markdown("**Popular Resume Datasets:**") |
|
st.markdown("- `ahmedheakl/resume-atlas`") |
|
st.markdown("- `InferenceFly/Resume-Dataset`") |
|
|
|
col1, col2 = st.columns([2, 1]) |
|
with col1: |
|
dataset_name = st.text_input( |
|
"Dataset name:", |
|
value="ahmedheakl/resume-atlas", |
|
help="Enter Hugging Face dataset name" |
|
) |
|
with col2: |
|
dataset_split = st.selectbox("Split:", ["train", "test", "validation"], index=0) |
|
|
|
if st.button("π Load from Hugging Face"): |
|
try: |
|
with st.spinner(f"π Loading {dataset_name}..."): |
|
dataset = load_dataset(dataset_name, split=dataset_split) |
|
|
|
st.success(f"β
Loaded dataset with {len(dataset)} entries") |
|
st.write("**Dataset Preview:**") |
|
|
|
preview_df = pd.DataFrame(dataset[:5]) |
|
st.dataframe(preview_df) |
|
|
|
text_column = st.selectbox( |
|
"Select column with resume text:", |
|
dataset.column_names, |
|
index=dataset.column_names.index('resume_text') if 'resume_text' in dataset.column_names else 0 |
|
) |
|
|
|
category_column = None |
|
if 'category' in dataset.column_names: |
|
categories = list(set(dataset['category'])) |
|
category_column = st.selectbox( |
|
"Filter by category (optional):", |
|
["All"] + categories |
|
) |
|
|
|
max_samples = st.slider("Maximum samples to load:", 10, min(1000, len(dataset)), 100) |
|
|
|
if st.button("π Process Dataset"): |
|
with st.spinner("π Processing dataset..."): |
|
resume_texts = [] |
|
file_names = [] |
|
|
|
filtered_dataset = dataset |
|
|
|
if category_column and category_column != "All": |
|
filtered_dataset = dataset.filter(lambda x: x['category'] == category_column) |
|
|
|
sample_indices = list(range(min(max_samples, len(filtered_dataset)))) |
|
|
|
for idx in sample_indices: |
|
item = filtered_dataset[idx] |
|
text = str(item[text_column]) |
|
|
|
if text and text.strip() and text.lower() != 'nan': |
|
resume_texts.append(text) |
|
|
|
if 'id' in item: |
|
file_names.append(f"Resume_{item['id']}") |
|
else: |
|
file_names.append(f"Resume_{idx}") |
|
|
|
st.session_state.resume_texts = resume_texts |
|
st.session_state.file_names = file_names |
|
|
|
if resume_texts: |
|
st.success(f"β
Successfully loaded {len(resume_texts)} resumes") |
|
|
|
except Exception as e: |
|
st.error(f"β Error loading dataset: {str(e)}") |
|
|
|
|
|
st.header("π Step 3: Analyze Resumes") |
|
|
|
|
|
col1, col2 = st.columns([1, 1]) |
|
|
|
with col1: |
|
if st.button("π Advanced Pipeline Analysis", |
|
disabled=not (job_description and st.session_state.resume_texts), |
|
type="primary", |
|
help="Run the complete 5-stage advanced pipeline"): |
|
if len(st.session_state.resume_texts) == 0: |
|
st.error("β Please upload resumes first!") |
|
elif not job_description.strip(): |
|
st.error("β Please enter a job description!") |
|
else: |
|
with st.spinner("π Running Advanced Pipeline Analysis..."): |
|
try: |
|
|
|
pipeline_results = screener.advanced_pipeline_ranking( |
|
st.session_state.resume_texts, job_description, final_top_k=top_k |
|
) |
|
|
|
|
|
results = [] |
|
|
|
for rank, result_data in enumerate(pipeline_results, 1): |
|
idx = result_data['index'] |
|
name = st.session_state.file_names[idx] |
|
text = st.session_state.resume_texts[idx] |
|
|
|
|
|
skills = screener.extract_skills(text, job_description) |
|
|
|
results.append({ |
|
'rank': rank, |
|
'name': name, |
|
'final_score': result_data['final_score'], |
|
'cross_encoder_score': result_data['cross_encoder_score'], |
|
'bm25_score': result_data['bm25_score'], |
|
'intent_score': result_data['intent_score'], |
|
'skills': skills, |
|
'text': text, |
|
'text_preview': text[:500] + "..." if len(text) > 500 else text, |
|
'explanation': None |
|
}) |
|
|
|
|
|
for result in results: |
|
result['explanation'] = screener.generate_simple_explanation( |
|
result['final_score'], |
|
result['cross_encoder_score'], |
|
result['bm25_score'], |
|
result['skills'] |
|
) |
|
|
|
|
|
st.session_state.results = results |
|
st.session_state.explanations_generated = False |
|
st.session_state.current_job_description = job_description |
|
|
|
st.success(f"π Advanced pipeline complete! Found top {len(st.session_state.results)} candidates.") |
|
|
|
except Exception as e: |
|
st.error(f"β Error during analysis: {str(e)}") |
|
|
|
|
|
with col2: |
|
|
|
show_explanation_button = ( |
|
st.session_state.results and |
|
use_llm_explanations and |
|
st.session_state.qwen3_model and |
|
not st.session_state.explanations_generated |
|
) |
|
|
|
if show_explanation_button: |
|
if st.button("π€ Generate AI Explanations", |
|
type="secondary", |
|
help="Generate detailed 150-word explanations using Qwen3-14B (takes longer)"): |
|
with st.spinner("π€ Generating detailed AI explanations..."): |
|
try: |
|
explanation_progress = st.progress(0) |
|
explanation_text = st.empty() |
|
|
|
for i, result in enumerate(st.session_state.results): |
|
explanation_text.text(f"π€ Generating AI explanation for candidate {i+1}/{len(st.session_state.results)}...") |
|
|
|
llm_explanation = screener.generate_llm_explanation( |
|
result['text'], |
|
st.session_state.current_job_description, |
|
result['final_score'], |
|
result['skills'] |
|
) |
|
result['explanation'] = llm_explanation |
|
|
|
explanation_progress.progress((i + 1) / len(st.session_state.results)) |
|
|
|
explanation_progress.empty() |
|
explanation_text.empty() |
|
|
|
|
|
st.session_state.explanations_generated = True |
|
|
|
st.success(f"π€ AI explanations generated for all {len(st.session_state.results)} candidates!") |
|
|
|
except Exception as e: |
|
st.error(f"β Error generating explanations: {str(e)}") |
|
|
|
elif st.session_state.results and st.session_state.explanations_generated: |
|
st.info("β
AI explanations already generated!") |
|
|
|
elif st.session_state.results and not use_llm_explanations: |
|
st.info("π‘ Enable 'Generate AI Explanations' in sidebar to use this feature") |
|
|
|
elif st.session_state.results and not st.session_state.qwen3_model: |
|
st.warning("β οΈ LLM model not available. Check your Hugging Face token.") |
|
|
|
|
|
if st.session_state.results: |
|
st.header("π Top Candidates") |
|
|
|
|
|
tab1, tab2, tab3 = st.tabs(["π Summary", "π Detailed Analysis", "π Visualizations"]) |
|
|
|
with tab1: |
|
|
|
summary_data = [] |
|
for result in st.session_state.results: |
|
|
|
intent_text = "Yes" if result['intent_score'] == 0.3 else "Maybe" if result['intent_score'] == 0.1 else "No" |
|
|
|
summary_data.append({ |
|
"Rank": result['rank'], |
|
"Candidate": result['name'], |
|
"Final Score": f"{result['final_score']:.2f}", |
|
"Cross-Encoder": f"{result['cross_encoder_score']:.2f}", |
|
"BM25": f"{result['bm25_score']:.2f}", |
|
"Intent": f"{intent_text} ({result['intent_score']:.1f})", |
|
"Top Skills": ", ".join(result['skills'][:5]) |
|
}) |
|
|
|
summary_df = pd.DataFrame(summary_data) |
|
|
|
|
|
def color_scores(val): |
|
if isinstance(val, str) and any(char.isdigit() for char in val): |
|
try: |
|
|
|
numeric_val = float(''.join(c for c in val if c.isdigit() or c == '.')) |
|
if 'Final Score' in val or numeric_val >= 1.0: |
|
if numeric_val >= 1.2: |
|
return 'background-color: #d4edda' |
|
elif numeric_val >= 1.0: |
|
return 'background-color: #fff3cd' |
|
else: |
|
return 'background-color: #f8d7da' |
|
else: |
|
if numeric_val >= 0.7: |
|
return 'background-color: #d4edda' |
|
elif numeric_val >= 0.5: |
|
return 'background-color: #fff3cd' |
|
else: |
|
return 'background-color: #f8d7da' |
|
except: |
|
pass |
|
return '' |
|
|
|
styled_df = summary_df.style.applymap(color_scores, subset=['Final Score', 'Cross-Encoder', 'BM25']) |
|
st.dataframe(styled_df, use_container_width=True) |
|
|
|
|
|
detailed_data = [] |
|
for result in st.session_state.results: |
|
intent_text = "Yes" if result['intent_score'] == 0.3 else "Maybe" if result['intent_score'] == 0.1 else "No" |
|
|
|
detailed_data.append({ |
|
"Rank": result['rank'], |
|
"Candidate": result['name'], |
|
"Final_Score": result['final_score'], |
|
"Cross_Encoder_Score": result['cross_encoder_score'], |
|
"BM25_Score": result['bm25_score'], |
|
"Intent_Score": result['intent_score'], |
|
"Intent_Analysis": intent_text, |
|
"Skills": "; ".join(result['skills']), |
|
"AI_Explanation": result['explanation'], |
|
"Resume_Preview": result['text_preview'] |
|
}) |
|
|
|
download_df = pd.DataFrame(detailed_data) |
|
st.markdown(create_download_link(download_df), unsafe_allow_html=True) |
|
|
|
with tab2: |
|
|
|
for result in st.session_state.results: |
|
intent_text = "Yes" if result['intent_score'] == 0.3 else "Maybe" if result['intent_score'] == 0.1 else "No" |
|
|
|
with st.expander(f"#{result['rank']}: {result['name']} (Final Score: {result['final_score']:.2f})"): |
|
col1, col2 = st.columns([1, 2]) |
|
|
|
with col1: |
|
st.metric("π Final Score", f"{result['final_score']:.2f}") |
|
|
|
st.write("**π Score Breakdown:**") |
|
st.metric("π― Cross-Encoder", f"{result['cross_encoder_score']:.2f}", help="Semantic relevance (0-1)") |
|
st.metric("π€ BM25 Keywords", f"{result['bm25_score']:.2f}", help="Keyword matching (0.1-0.2)") |
|
st.metric("π€ Intent Analysis", f"{intent_text} ({result['intent_score']:.1f})", help="Job seeking likelihood (0-0.3)") |
|
|
|
st.write("**π― Matching Skills:**") |
|
skills_per_column = 5 |
|
skill_cols = st.columns(2) |
|
for idx, skill in enumerate(result['skills'][:10]): |
|
with skill_cols[idx % 2]: |
|
st.write(f"β’ {skill}") |
|
|
|
with col2: |
|
st.write("**π‘ AI-Generated Match Analysis:**") |
|
st.info(result['explanation']) |
|
|
|
st.write("**π Resume Preview:**") |
|
st.text_area("", result['text_preview'], height=200, disabled=True, key=f"preview_{result['rank']}") |
|
|
|
with tab3: |
|
|
|
if len(st.session_state.results) > 1: |
|
|
|
st.subheader("Score Comparison") |
|
|
|
chart_data = pd.DataFrame({ |
|
'Candidate': [r['name'][:20] + '...' if len(r['name']) > 20 else r['name'] |
|
for r in st.session_state.results], |
|
'Final Score': [r['final_score'] for r in st.session_state.results], |
|
'Cross-Encoder': [r['cross_encoder_score'] for r in st.session_state.results], |
|
'BM25': [r['bm25_score'] for r in st.session_state.results], |
|
'Intent': [r['intent_score'] for r in st.session_state.results] |
|
}) |
|
|
|
st.bar_chart(chart_data.set_index('Candidate')) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
st.subheader("Score Distribution") |
|
score_ranges = { |
|
'Excellent (β₯1.2)': sum(1 for r in st.session_state.results if r['final_score'] >= 1.2), |
|
'Good (1.0-1.2)': sum(1 for r in st.session_state.results if 1.0 <= r['final_score'] < 1.2), |
|
'Fair (0.8-1.0)': sum(1 for r in st.session_state.results if 0.8 <= r['final_score'] < 1.0), |
|
'Poor (<0.8)': sum(1 for r in st.session_state.results if r['final_score'] < 0.8), |
|
} |
|
|
|
dist_df = pd.DataFrame({ |
|
'Range': score_ranges.keys(), |
|
'Count': score_ranges.values() |
|
}) |
|
st.bar_chart(dist_df.set_index('Range')) |
|
|
|
with col2: |
|
st.subheader("Average Scores") |
|
avg_final = np.mean([r['final_score'] for r in st.session_state.results]) |
|
avg_cross = np.mean([r['cross_encoder_score'] for r in st.session_state.results]) |
|
avg_bm25 = np.mean([r['bm25_score'] for r in st.session_state.results]) |
|
avg_intent = np.mean([r['intent_score'] for r in st.session_state.results]) |
|
|
|
st.metric("Average Final Score", f"{avg_final:.2f}") |
|
st.metric("Average Cross-Encoder", f"{avg_cross:.2f}") |
|
st.metric("Average BM25", f"{avg_bm25:.2f}") |
|
st.metric("Average Intent", f"{avg_intent:.2f}") |
|
|
|
|
|
st.markdown("---") |
|
st.subheader("π§Ή Reset Application") |
|
col1, col2, col3 = st.columns([1, 1, 3]) |
|
with col1: |
|
if st.button("ποΈ Clear Resumes Only", type="secondary", help="Clear only the loaded resumes"): |
|
st.session_state.resume_texts = [] |
|
st.session_state.file_names = [] |
|
st.session_state.results = [] |
|
st.session_state.explanations_generated = False |
|
st.session_state.current_job_description = "" |
|
st.success("β
Resumes cleared!") |
|
st.rerun() |
|
|
|
with col2: |
|
if st.button("π§Ή Clear Everything", type="primary", help="Clear all data and free memory"): |
|
st.session_state.resume_texts = [] |
|
st.session_state.file_names = [] |
|
st.session_state.results = [] |
|
st.session_state.explanations_generated = False |
|
st.session_state.current_job_description = "" |
|
|
|
if torch.cuda.is_available(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
st.success("β
Everything cleared!") |
|
st.rerun() |
|
|
|
|
|
st.markdown("---") |
|
st.markdown( |
|
""" |
|
<div style='text-align: center; color: #666;'> |
|
π Powered by BAAI/bge-large-en-v1.5 & Qwen3-14B | Built with Streamlit |
|
</div> |
|
""", |
|
unsafe_allow_html=True |
|
) |