|
import streamlit as st |
|
import pandas as pd |
|
import numpy as np |
|
import torch |
|
import nltk |
|
import os |
|
import tempfile |
|
import base64 |
|
from rank_bm25 import BM25Okapi |
|
from sentence_transformers import SentenceTransformer, CrossEncoder |
|
from nltk.tokenize import word_tokenize |
|
import pdfplumber |
|
import PyPDF2 |
|
from docx import Document |
|
import csv |
|
from datasets import load_dataset |
|
import gc |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import time |
|
import faiss |
|
import re |
|
|
|
|
|
try: |
|
nltk.data.find('tokenizers/punkt') |
|
except LookupError: |
|
nltk.download('punkt') |
|
|
|
|
|
st.set_page_config( |
|
page_title="AI Resume Screener", |
|
page_icon="π―", |
|
layout="wide", |
|
initial_sidebar_state="expanded" |
|
) |
|
|
|
|
|
|
|
|
|
keys_to_initialize = { |
|
'embedding_model': None, 'embedding_model_error': None, |
|
'cross_encoder': None, 'cross_encoder_error': None, |
|
'qwen3_1_7b_tokenizer': None, 'qwen3_1_7b_tokenizer_error': None, |
|
'qwen3_1_7b_model': None, 'qwen3_1_7b_model_error': None, |
|
'results': [], 'resume_texts': [], 'file_names': [], 'current_job_description': "" |
|
|
|
} |
|
for key, default_value in keys_to_initialize.items(): |
|
if key not in st.session_state: |
|
st.session_state[key] = default_value |
|
|
|
|
|
if st.session_state.embedding_model is None and st.session_state.embedding_model_error is None: |
|
print("[Global Init] Attempting to load Embedding Model (BAAI/bge-large-en-v1.5) with device_map='auto'...") |
|
try: |
|
st.session_state.embedding_model = SentenceTransformer( |
|
'BAAI/bge-large-en-v1.5', |
|
device_map="auto" |
|
) |
|
print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED with device_map='auto'.") |
|
except Exception as e: |
|
if "device_map" in str(e).lower() and "unexpected keyword argument" in str(e).lower(): |
|
print("β οΈ [Global Init] device_map='auto' not supported for SentenceTransformer. Falling back to default device handling.") |
|
try: |
|
st.session_state.embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5') |
|
print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED (fallback device handling).") |
|
except Exception as e_fallback: |
|
error_msg = f"Failed to load Embedding Model (fallback): {str(e_fallback)}" |
|
print(f"β [Global Init] {error_msg}") |
|
st.session_state.embedding_model_error = error_msg |
|
else: |
|
error_msg = f"Failed to load Embedding Model: {str(e)}" |
|
print(f"β [Global Init] {error_msg}") |
|
st.session_state.embedding_model_error = error_msg |
|
|
|
|
|
if st.session_state.cross_encoder is None and st.session_state.cross_encoder_error is None: |
|
print("[Global Init] Attempting to load Cross-Encoder Model (ms-marco-MiniLM-L6-v2) with device_map='auto'...") |
|
try: |
|
st.session_state.cross_encoder = CrossEncoder( |
|
'cross-encoder/ms-marco-MiniLM-L6-v2', |
|
device_map="auto" |
|
) |
|
print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED with device_map='auto'.") |
|
except Exception as e: |
|
if "device_map" in str(e).lower() and "unexpected keyword argument" in str(e).lower(): |
|
print("β οΈ [Global Init] device_map='auto' not supported for CrossEncoder. Falling back to default device handling.") |
|
try: |
|
st.session_state.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2') |
|
print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED (fallback device handling).") |
|
except Exception as e_fallback: |
|
error_msg = f"Failed to load Cross-Encoder Model (fallback): {str(e_fallback)}" |
|
print(f"β [Global Init] {error_msg}") |
|
st.session_state.cross_encoder_error = error_msg |
|
else: |
|
error_msg = f"Failed to load Cross-Encoder Model: {str(e)}" |
|
print(f"β [Global Init] {error_msg}") |
|
st.session_state.cross_encoder_error = error_msg |
|
|
|
|
|
if st.session_state.qwen3_1_7b_tokenizer is None and st.session_state.qwen3_1_7b_tokenizer_error is None: |
|
print("[Global Init] Loading Qwen3-1.7B Tokenizer...") |
|
try: |
|
st.session_state.qwen3_1_7b_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-1.7B") |
|
print("[Global Init] Qwen3-1.7B Tokenizer Loaded.") |
|
except Exception as e: |
|
error_msg = f"Failed to load Qwen3-1.7B Tokenizer: {str(e)}" |
|
print(f"β [Global Init] {error_msg}") |
|
st.session_state.qwen3_1_7b_tokenizer_error = error_msg |
|
|
|
|
|
if st.session_state.qwen3_1_7b_model is None and st.session_state.qwen3_1_7b_model_error is None: |
|
print("[Global Init] Loading Qwen3-1.7B Model (attempting with device_map='auto')...") |
|
try: |
|
st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained( |
|
"Qwen/Qwen3-1.7B", |
|
torch_dtype="auto", |
|
device_map="auto", |
|
trust_remote_code=True |
|
) |
|
print("[Global Init] Qwen3-1.7B Model Loaded with device_map='auto'.") |
|
except Exception as e_dev_map: |
|
print(f"β οΈ [Global Init] Failed to load Qwen3-1.7B with device_map='auto': {str(e_dev_map)}") |
|
print("[Global Init] Retrying Qwen3-1.7B load without device_map (will use default single device)...") |
|
try: |
|
st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained( |
|
"Qwen/Qwen3-1.7B", |
|
torch_dtype="auto", |
|
|
|
trust_remote_code=True |
|
) |
|
print("[Global Init] Qwen3-1.7B Model Loaded (fallback device handling).") |
|
except Exception as e_fallback: |
|
error_msg = f"Failed to load Qwen3-1.7B Model (fallback): {str(e_fallback)}" |
|
print(f"β [Global Init] {error_msg}") |
|
st.session_state.qwen3_1_7b_model_error = error_msg |
|
|
|
|
|
|
|
|
|
|
|
def generate_qwen3_response(prompt, tokenizer, model, max_new_tokens=200): |
|
|
|
messages = [{"role": "user", "content": prompt}] |
|
text = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True, |
|
enable_thinking=True |
|
) |
|
model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
|
generated_ids = model.generate( |
|
**model_inputs, |
|
max_new_tokens=max_new_tokens |
|
) |
|
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() |
|
response = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n") |
|
return response |
|
|
|
class ResumeScreener: |
|
def __init__(self): |
|
|
|
print("[ResumeScreener] Initializing with references to globally loaded models...") |
|
self.embedding_model = st.session_state.get('embedding_model') |
|
self.cross_encoder = st.session_state.get('cross_encoder') |
|
|
|
if self.embedding_model: |
|
print("[ResumeScreener] Embedding model reference set.") |
|
else: |
|
print("[ResumeScreener] Embedding model not available (check loading errors).") |
|
|
|
if self.cross_encoder: |
|
print("[ResumeScreener] Cross-encoder model reference set.") |
|
else: |
|
print("[ResumeScreener] Cross-encoder model not available (check loading errors).") |
|
|
|
print("[ResumeScreener] Initialization complete.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_text_from_file(self, file_path, file_type): |
|
|
|
try: |
|
if file_type == "pdf": |
|
with open(file_path, 'rb') as file: |
|
with pdfplumber.open(file) as pdf: |
|
text = "" |
|
for page in pdf.pages: |
|
text += page.extract_text() or "" |
|
if not text.strip(): |
|
file.seek(0) |
|
reader = PyPDF2.PdfReader(file) |
|
text = "" |
|
for page_num in range(len(reader.pages)): |
|
text += reader.pages[page_num].extract_text() or "" |
|
return text |
|
elif file_type == "docx": |
|
doc = Document(file_path) |
|
return " ".join([paragraph.text for paragraph in doc.paragraphs]) |
|
elif file_type == "txt": |
|
with open(file_path, 'r', encoding='utf-8') as file: |
|
return file.read() |
|
elif file_type == "csv": |
|
with open(file_path, 'r', encoding='utf-8') as file: |
|
csv_reader = csv.reader(file) |
|
return " ".join([" ".join(row) for row in csv_reader]) |
|
except Exception as e: |
|
st.error(f"Error extracting text from {file_path}: {str(e)}") |
|
return "" |
|
|
|
def get_embedding(self, text): |
|
if self.embedding_model is None: |
|
st.error("Embedding model is not available!") |
|
return np.zeros(1024) |
|
try: |
|
if len(text) < 500: |
|
text = "Represent this sentence for searching relevant passages: " + text |
|
text = text[:8192] if text else "" |
|
embedding = self.embedding_model.encode(text, convert_to_numpy=True, normalize_embeddings=True) |
|
return embedding |
|
except Exception as e: |
|
st.error(f"Error generating embedding: {str(e)}") |
|
return np.zeros(1024) |
|
|
|
def calculate_bm25_scores(self, resume_texts, job_description): |
|
try: |
|
job_tokens = word_tokenize(job_description.lower()) |
|
corpus = [word_tokenize(text.lower()) for text in resume_texts if text and text.strip()] |
|
if not corpus: |
|
return [0.0] * len(resume_texts) |
|
bm25 = BM25Okapi(corpus) |
|
scores = bm25.get_scores(job_tokens) |
|
return scores.tolist() |
|
except Exception as e: |
|
st.error(f"Error calculating BM25 scores: {str(e)}") |
|
return [0.0] * len(resume_texts) |
|
|
|
def advanced_pipeline_ranking(self, resume_texts, job_description): |
|
print("[Pipeline] Advanced Pipeline Ranking started.") |
|
if not resume_texts: |
|
return [] |
|
st.info("π Stage 1: FAISS Recall - Finding top candidates...") |
|
top_50_indices = self.faiss_recall(resume_texts, job_description, top_k=50) |
|
st.info("π― Stage 2: Cross-Encoder Re-ranking - Selecting top candidates...") |
|
top_20_results = self.cross_encoder_rerank(resume_texts, job_description, top_50_indices, top_k=20) |
|
st.info("π€ Stage 3: BM25 Keyword Matching...") |
|
top_20_with_bm25 = self.add_bm25_scores(resume_texts, job_description, top_20_results) |
|
st.info("π€ Stage 4: LLM Intent Analysis (Qwen3-1.7B)...") |
|
top_20_with_intent = self.add_intent_scores(resume_texts, job_description, top_20_with_bm25) |
|
st.info("π Stage 5: Final Combined Ranking...") |
|
final_results = self.calculate_final_scores(top_20_with_intent) |
|
print("[Pipeline] Advanced Pipeline Ranking finished.") |
|
return final_results[:st.session_state.get('top_k', 5)] |
|
|
|
def faiss_recall(self, resume_texts, job_description, top_k=50): |
|
print("[faiss_recall] Method started.") |
|
st.text("FAISS Recall: Embedding job description...") |
|
job_embedding = self.get_embedding(job_description) |
|
st.text(f"FAISS Recall: Embedding {len(resume_texts)} resumes...") |
|
resume_embeddings = [] |
|
progress_bar = st.progress(0) |
|
for i, text in enumerate(resume_texts): |
|
if text: |
|
embedding = self.embedding_model.encode(text[:8192], convert_to_numpy=True, normalize_embeddings=True) |
|
resume_embeddings.append(embedding) |
|
else: |
|
resume_embeddings.append(np.zeros(1024)) |
|
progress_bar.progress((i + 1) / len(resume_texts)) |
|
progress_bar.empty() |
|
resume_embeddings_np = np.array(resume_embeddings).astype('float32') |
|
if resume_embeddings_np.ndim == 1: |
|
resume_embeddings_np = resume_embeddings_np.reshape(1, -1) |
|
if resume_embeddings_np.size == 0: |
|
print("[faiss_recall] No resume embeddings to add to FAISS index.") |
|
return [] |
|
|
|
dimension = resume_embeddings_np.shape[1] |
|
index = faiss.IndexFlatIP(dimension) |
|
index.add(resume_embeddings_np) |
|
job_embedding_np = job_embedding.reshape(1, -1).astype('float32') |
|
scores, indices = index.search(job_embedding_np, min(top_k, len(resume_texts))) |
|
return indices[0].tolist() |
|
|
|
def cross_encoder_rerank(self, resume_texts, job_description, top_50_indices, top_k=20): |
|
print("[cross_encoder_rerank] Method started.") |
|
if not self.cross_encoder: |
|
st.error("Cross-encoder model is not available!") |
|
return [(idx, 0.0) for idx in top_50_indices[:top_k]] |
|
pairs = [] |
|
valid_indices = [] |
|
for idx in top_50_indices: |
|
if idx < len(resume_texts) and resume_texts[idx]: |
|
job_snippet = job_description[:512] |
|
resume_snippet = resume_texts[idx][:512] |
|
pairs.append([job_snippet, resume_snippet]) |
|
valid_indices.append(idx) |
|
if not pairs: |
|
return [(idx, 0.0) for idx in top_50_indices[:top_k]] |
|
st.text(f"Cross-Encoder: Preparing {len(pairs)} pairs for re-ranking...") |
|
scores = [] |
|
batch_size = 8 |
|
progress_bar = st.progress(0) |
|
for i in range(0, len(pairs), batch_size): |
|
batch = pairs[i:i+batch_size] |
|
batch_scores = self.cross_encoder.predict(batch) |
|
scores.extend(batch_scores) |
|
progress_bar.progress(min(1.0, (i + batch_size) / len(pairs))) |
|
progress_bar.empty() |
|
indexed_scores = list(zip(valid_indices, scores)) |
|
indexed_scores.sort(key=lambda x: x[1], reverse=True) |
|
return indexed_scores[:top_k] |
|
|
|
def add_bm25_scores(self, resume_texts, job_description, top_20_results): |
|
st.text("BM25: Calculating keyword scores...") |
|
top_20_texts = [resume_texts[idx] for idx, _ in top_20_results] |
|
bm25_scores_raw = self.calculate_bm25_scores(top_20_texts, job_description) |
|
if bm25_scores_raw and max(bm25_scores_raw) > 0: |
|
max_bm25, min_bm25 = max(bm25_scores_raw), min(bm25_scores_raw) |
|
if max_bm25 > min_bm25: |
|
normalized_bm25 = [0.1 + 0.1 * (s - min_bm25) / (max_bm25 - min_bm25) for s in bm25_scores_raw] |
|
else: |
|
normalized_bm25 = [0.15] * len(bm25_scores_raw) |
|
else: |
|
normalized_bm25 = [0.15] * len(top_20_results) |
|
results_with_bm25 = [] |
|
for i, (idx, cross_score) in enumerate(top_20_results): |
|
results_with_bm25.append((idx, cross_score, normalized_bm25[i] if i < len(normalized_bm25) else 0.15)) |
|
return results_with_bm25 |
|
|
|
def add_intent_scores(self, resume_texts, job_description, top_20_with_bm25): |
|
st.text(f"LLM Intent: Analyzing intent for {len(top_20_with_bm25)} candidates (Qwen3-1.7B)...") |
|
results_with_intent = [] |
|
progress_bar = st.progress(0) |
|
for i, (idx, cross_score, bm25_score) in enumerate(top_20_with_bm25): |
|
intent_score = self.analyze_intent(resume_texts[idx], job_description) |
|
results_with_intent.append((idx, cross_score, bm25_score, intent_score)) |
|
progress_bar.progress((i + 1) / len(top_20_with_bm25)) |
|
progress_bar.empty() |
|
return results_with_intent |
|
|
|
def analyze_intent(self, resume_text, job_description): |
|
print(f"[analyze_intent] Analyzing intent for one resume (Qwen3-1.7B)...") |
|
st.text("LLM Intent: Analyzing intent (Qwen3-1.7B)...") |
|
try: |
|
resume_snippet = resume_text[:15000] |
|
job_snippet = job_description[:5000] |
|
prompt = f\"\"\"You are given a job description and a candidate's resume... (rest of prompt)\"\"\" # Ensure f-string is correct |
|
# ... (rest of analyze_intent, using st.session_state.qwen3_1_7b_tokenizer and _model) |
|
response_text = generate_qwen3_response( |
|
prompt, |
|
st.session_state.qwen3_1_7b_tokenizer, |
|
st.session_state.qwen3_1_7b_model, |
|
max_new_tokens=20000 |
|
) |
|
# ... (parsing logic for response_text) ... |
|
thinking_content = "No detailed thought process extracted." |
|
intent_decision_part = response_text |
|
think_start_tag = "<think>" |
|
think_end_tag = "</think>" |
|
start_index = response_text.find(think_start_tag) |
|
end_index = response_text.rfind(think_end_tag) |
|
if start_index != -1 and end_index != -1 and start_index < end_index: |
|
thinking_content = response_text[start_index + len(think_start_tag):end_index].strip() |
|
intent_decision_part = response_text[end_index + len(think_end_tag):].strip() |
|
response_lower = intent_decision_part.lower() |
|
intent_score = 0.1 |
|
if 'intent: yes' in response_lower or 'intent:yes' in response_lower: |
|
intent_score = 0.3 |
|
elif 'intent: no' in response_lower or 'intent:no' in response_lower: |
|
intent_score = 0.0 |
|
return intent_score |
|
except Exception as e: |
|
st.warning(f"Error analyzing intent with Qwen3-1.7B: {str(e)}") |
|
return 0.1 |
|
|
|
def calculate_final_scores(self, results_with_all_scores): |
|
final_results = [] |
|
for idx, cross_score, bm25_score, intent_score in results_with_all_scores: |
|
normalized_cross = max(0, min(1, cross_score)) |
|
final_score = normalized_cross + bm25_score + intent_score |
|
final_results.append({ |
|
'index': idx, 'cross_encoder_score': normalized_cross, |
|
'bm25_score': bm25_score, 'intent_score': intent_score, |
|
'final_score': final_score |
|
}) |
|
final_results.sort(key=lambda x: x['final_score'], reverse=True) |
|
return final_results |
|
|
|
def extract_skills(self, text, job_description): |
|
# ... (implementation) |
|
if not text: return [] |
|
common_skills = ["python", "java", "javascript", "react", "angular", "vue", "node.js", "express", "django", "flask", "spring", "sql", "nosql", "html", "css", "aws", "azure", "gcp", "docker", "kubernetes", "jenkins", "git", "github", "agile", "scrum", "jira", "ci/cd", "devops", "microservices", "rest", "api", "machine learning", "deep learning", "data science", "artificial intelligence", "tensorflow", "pytorch", "keras", "scikit-learn", "pandas", "numpy", "matplotlib", "seaborn", "jupyter", "r", "sas", "spss", "tableau", "powerbi", "excel", "mysql", "postgresql", "mongodb", "redis", "elasticsearch", "kafka", "rabbitmq", "spark", "hadoop", "hive", "airflow", "linux", "unix"] |
|
job_words = set(word.lower() for word in word_tokenize(job_description) if len(word) > 2) |
|
found_skills = [] |
|
text_lower = text.lower() |
|
for skill in common_skills: |
|
if skill in text_lower and any(skill in job_word for job_word in job_words): |
|
found_skills.append(skill) |
|
for word in job_words: |
|
if len(word) > 3 and word in text_lower and word not in found_skills and word not in ['with', 'have', 'that', 'this', 'from', 'what', 'when', 'where']: |
|
found_skills.append(word) |
|
return list(set(found_skills))[:15] |
|
|
|
def create_download_link(df, filename="resume_screening_results.csv"): |
|
# ... (implementation) |
|
csv = df.to_csv(index=False) |
|
b64 = base64.b64encode(csv.encode()).decode() |
|
return f'<a href="data:file/csv;base64,{b64}" download="{filename}" class="download-btn">π₯ Download Results CSV</a>' |
|
|
|
# --- Sidebar Configuration (Must be after global model loading and class defs if it uses them) --- |
|
with st.sidebar: |
|
st.title("βοΈ Configuration") |
|
# Advanced options |
|
st.subheader("Advanced Options") |
|
# Ensure top_k is in session_state if it's used by advanced_pipeline_ranking before button press |
|
if 'top_k' not in st.session_state: |
|
st.session_state.top_k = 5 # Default value |
|
st.session_state.top_k = st.selectbox("Number of results to display", [1,2,3,4,5], index=st.session_state.top_k-1, key="top_k_selector") |
|
|
|
# LLM Settings |
|
st.subheader("LLM Settings") |
|
# use_llm_explanations = st.checkbox("Generate AI Explanations", value=True) # This was removed earlier |
|
# if use_llm_explanations: |
|
# hf_token = st.text_input("Hugging Face Token (optional)", type="password", |
|
# help="Enter your HF token for better rate limits") |
|
|
|
st.markdown("---") |
|
st.markdown(" |
|
st.markdown("- **Stage 1**: FAISS Recall (Top 50)") |
|
st.markdown("- **Stage 2**: Cross-Encoder Re-ranking (Top 20)") |
|
st.markdown("- **Stage 3**: BM25 Keyword Matching") |
|
st.markdown("- **Stage 4**: LLM Intent Analysis (Qwen3-1.7B)") |
|
st.markdown("- **Final**: Combined Scoring") |
|
st.markdown("### π Models Used") |
|
st.markdown("- **Embedding**: BAAI/bge-large-en-v1.5") |
|
st.markdown("- **Cross-Encoder**: ms-marco-MiniLM-L6-v2") |
|
st.markdown("- **LLM**: Qwen/Qwen3-1.7B") |
|
st.markdown("### π Scoring Formula") |
|
st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**") |
|
|
|
|
|
st.title("π― AI-Powered Resume Screener") |
|
|
|
|
|
st.markdown("*Find the perfect candidates using BAAI/bge-large-en-v1.5 embeddings and Qwen3-1.7B for intent analysis*") |
|
|
|
st.subheader("π€ Model Loading Status") |
|
col1, col2 = st.columns(2) |
|
with col1: |
|
if st.session_state.get('embedding_model_error'): |
|
st.error(f"Embedding Model: {st.session_state.embedding_model_error}") |
|
elif st.session_state.get('embedding_model'): |
|
st.success("β
Embedding Model (BAAI/bge-large-en-v1.5) loaded.") |
|
else: |
|
st.warning("β³ Embedding Model loading or not found (check console).") |
|
if st.session_state.get('cross_encoder_error'): |
|
st.error(f"Cross-Encoder Model: {st.session_state.cross_encoder_error}") |
|
elif st.session_state.get('cross_encoder'): |
|
st.success("β
Cross-Encoder Model (ms-marco-MiniLM-L6-v2) loaded.") |
|
else: |
|
st.warning("β³ Cross-Encoder Model loading or not found (check console).") |
|
with col2: |
|
if st.session_state.get('qwen3_1_7b_tokenizer_error'): |
|
st.error(f"Qwen3-1.7B Tokenizer: {st.session_state.qwen3_1_7b_tokenizer_error}") |
|
elif st.session_state.get('qwen3_1_7b_tokenizer'): |
|
st.success("β
Qwen3-1.7B Tokenizer loaded.") |
|
else: |
|
st.warning("β³ Qwen3-1.7B Tokenizer loading or not found (check console).") |
|
if st.session_state.get('qwen3_1_7b_model_error'): |
|
st.error(f"Qwen3-1.7B Model: {st.session_state.qwen3_1_7b_model_error}") |
|
elif st.session_state.get('qwen3_1_7b_model'): |
|
st.success("β
Qwen3-1.7B Model loaded.") |
|
else: |
|
st.warning("β³ Qwen3-1.7B Model loading or not found (check console).") |
|
st.markdown("---") |
|
|
|
|
|
screener = ResumeScreener() |
|
|
|
|
|
st.header("π Step 1: Enter Job Description") |
|
job_description = st.text_area( |
|
"Enter the complete job description or requirements:", |
|
height=150, |
|
placeholder="Paste the job description here, including required skills, experience, and qualifications..." |
|
) |
|
|
|
|
|
st.header("π Step 2: Upload Resumes") |
|
|
|
|
|
if st.session_state.resume_texts: |
|
col1, col2 = st.columns([3, 1]) |
|
with col1: |
|
st.info(f"π {len(st.session_state.resume_texts)} resumes loaded and ready for analysis") |
|
with col2: |
|
if st.button("ποΈ Clear Resumes", type="secondary", help="Clear all loaded resumes to start fresh"): |
|
st.session_state.resume_texts = [] |
|
st.session_state.file_names = [] |
|
st.session_state.results = [] |
|
st.session_state.current_job_description = "" |
|
st.rerun() |
|
|
|
input_method = st.radio( |
|
"Choose input method:", |
|
["π Upload Files", "ποΈ Load from CSV Dataset", "π Load from Hugging Face Dataset"] |
|
) |
|
|
|
if input_method == "π Upload Files": |
|
uploaded_files = st.file_uploader( |
|
"Upload resume files", |
|
type=["pdf", "docx", "txt"], |
|
accept_multiple_files=True, |
|
help="Supported formats: PDF, DOCX, TXT" |
|
) |
|
|
|
if uploaded_files: |
|
with st.spinner(f"π Processing {len(uploaded_files)} files..."): |
|
resume_texts = [] |
|
file_names = [] |
|
|
|
for file in uploaded_files: |
|
file_type = file.name.split('.')[-1].lower() |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{file_type}') as tmp_file: |
|
tmp_file.write(file.getvalue()) |
|
tmp_path = tmp_file.name |
|
|
|
text = screener.extract_text_from_file(tmp_path, file_type) |
|
if text.strip(): |
|
resume_texts.append(text) |
|
file_names.append(file.name) |
|
|
|
os.unlink(tmp_path) |
|
|
|
st.session_state.resume_texts = resume_texts |
|
st.session_state.file_names = file_names |
|
|
|
if resume_texts: |
|
st.success(f"β
Successfully processed {len(resume_texts)} resumes") |
|
|
|
elif input_method == "ποΈ Load from CSV Dataset": |
|
csv_file = st.file_uploader("Upload CSV file with resume data", type=["csv"]) |
|
|
|
if csv_file: |
|
try: |
|
df = pd.read_csv(csv_file) |
|
st.write("**CSV Preview:**") |
|
st.dataframe(df.head()) |
|
|
|
text_column = st.selectbox( |
|
"Select column containing resume text:", |
|
df.columns.tolist() |
|
) |
|
|
|
name_column = st.selectbox( |
|
"Select column for candidate names/IDs (optional):", |
|
["Use Index"] + df.columns.tolist() |
|
) |
|
|
|
if st.button("π Process CSV Data"): |
|
with st.spinner("π Processing CSV data..."): |
|
resume_texts = [] |
|
file_names = [] |
|
|
|
for idx, row in df.iterrows(): |
|
text = str(row[text_column]) |
|
if text and text.strip() and text.lower() != 'nan': |
|
resume_texts.append(text) |
|
|
|
if name_column == "Use Index": |
|
file_names.append(f"Resume_{idx}") |
|
else: |
|
file_names.append(str(row[name_column])) |
|
|
|
st.session_state.resume_texts = resume_texts |
|
st.session_state.file_names = file_names |
|
|
|
if resume_texts: |
|
st.success(f"β
Successfully loaded {len(resume_texts)} resumes from CSV") |
|
|
|
except Exception as e: |
|
st.error(f"β Error processing CSV: {str(e)}") |
|
|
|
elif input_method == "π Load from Hugging Face Dataset": |
|
st.markdown("**Popular Resume Datasets:**") |
|
st.markdown("- `ahmedheakl/resume-atlas`") |
|
st.markdown("- `InferenceFly/Resume-Dataset`") |
|
|
|
col1, col2 = st.columns([2, 1]) |
|
with col1: |
|
dataset_name = st.text_input( |
|
"Dataset name:", |
|
value="ahmedheakl/resume-atlas", |
|
help="Enter Hugging Face dataset name" |
|
) |
|
with col2: |
|
dataset_split = st.selectbox("Split:", ["train", "test", "validation"], index=0) |
|
|
|
if st.button("π Load from Hugging Face"): |
|
try: |
|
with st.spinner(f"π Loading {dataset_name}..."): |
|
dataset = load_dataset(dataset_name, split=dataset_split) |
|
|
|
st.success(f"β
Loaded dataset with {len(dataset)} entries") |
|
st.write("**Dataset Preview:**") |
|
|
|
preview_df = pd.DataFrame(dataset[:5]) |
|
st.dataframe(preview_df) |
|
|
|
text_column = st.selectbox( |
|
"Select column with resume text:", |
|
dataset.column_names, |
|
index=dataset.column_names.index('resume_text') if 'resume_text' in dataset.column_names else 0 |
|
) |
|
|
|
category_column = None |
|
if 'category' in dataset.column_names: |
|
categories = list(set(dataset['category'])) |
|
category_column = st.selectbox( |
|
"Filter by category (optional):", |
|
["All"] + categories |
|
) |
|
|
|
max_samples = st.slider("Maximum samples to load:", 10, min(1000, len(dataset)), 100) |
|
|
|
if st.button("π Process Dataset"): |
|
with st.spinner("π Processing dataset..."): |
|
resume_texts = [] |
|
file_names = [] |
|
|
|
filtered_dataset = dataset |
|
|
|
if category_column and category_column != "All": |
|
filtered_dataset = dataset.filter(lambda x: x['category'] == category_column) |
|
|
|
sample_indices = list(range(min(max_samples, len(filtered_dataset)))) |
|
|
|
for idx in sample_indices: |
|
item = filtered_dataset[idx] |
|
text = str(item[text_column]) |
|
|
|
if text and text.strip() and text.lower() != 'nan': |
|
resume_texts.append(text) |
|
|
|
if 'id' in item: |
|
file_names.append(f"Resume_{item['id']}") |
|
else: |
|
file_names.append(f"Resume_{idx}") |
|
|
|
st.session_state.resume_texts = resume_texts |
|
st.session_state.file_names = file_names |
|
|
|
if resume_texts: |
|
st.success(f"β
Successfully loaded {len(resume_texts)} resumes") |
|
|
|
except Exception as e: |
|
st.error(f"β Error loading dataset: {str(e)}") |
|
|
|
|
|
st.header("π Step 3: Analyze Resumes") |
|
|
|
|
|
col1, col2 = st.columns([1, 1]) |
|
|
|
with col1: |
|
if st.button("π Advanced Pipeline Analysis", |
|
disabled=not (job_description and st.session_state.resume_texts and |
|
st.session_state.get('embedding_model') and |
|
st.session_state.get('cross_encoder') and |
|
st.session_state.get('qwen3_1_7b_model') and |
|
st.session_state.get('qwen3_1_7b_tokenizer')), |
|
type="primary", |
|
help="Run the complete 5-stage advanced pipeline"): |
|
print("--- Advanced Pipeline Analysis Button Clicked ---") |
|
if len(st.session_state.resume_texts) == 0: |
|
st.error("β Please upload resumes first!") |
|
elif not job_description.strip(): |
|
st.error("β Please enter a job description!") |
|
else: |
|
print("[UI Button] Pre-checks passed. Starting spinner and pipeline.") |
|
with st.spinner("π Running Advanced Pipeline Analysis..."): |
|
st.text("Pipeline Initiated: Starting advanced analysis...") |
|
try: |
|
|
|
pipeline_results = screener.advanced_pipeline_ranking( |
|
st.session_state.resume_texts, job_description |
|
) |
|
|
|
|
|
results = [] |
|
|
|
for rank, result_data in enumerate(pipeline_results, 1): |
|
idx = result_data['index'] |
|
name = st.session_state.file_names[idx] |
|
text = st.session_state.resume_texts[idx] |
|
|
|
|
|
skills = screener.extract_skills(text, job_description) |
|
|
|
results.append({ |
|
'rank': rank, |
|
'name': name, |
|
'final_score': result_data['final_score'], |
|
'cross_encoder_score': result_data['cross_encoder_score'], |
|
'bm25_score': result_data['bm25_score'], |
|
'intent_score': result_data['intent_score'], |
|
'skills': skills, |
|
'text': text, |
|
'text_preview': text[:500] + "..." if len(text) > 500 else text |
|
}) |
|
|
|
|
|
st.session_state.results = results |
|
st.session_state.current_job_description = job_description |
|
|
|
st.success(f"π Advanced pipeline complete! Found top {len(st.session_state.results)} candidates.") |
|
st.text("Displaying Top Candidates...") |
|
|
|
except Exception as e: |
|
st.error(f"β Error during analysis: {str(e)}") |
|
|
|
|
|
if st.session_state.results: |
|
st.header("π Top Candidates") |
|
|
|
|
|
tab1, tab2, tab3 = st.tabs(["π Summary", "π Detailed Analysis", "π Visualizations"]) |
|
|
|
with tab1: |
|
|
|
summary_data = [] |
|
for result in st.session_state.results: |
|
|
|
intent_text = "Yes" if result['intent_score'] == 0.3 else "Maybe" if result['intent_score'] == 0.1 else "No" |
|
|
|
summary_data.append({ |
|
"Rank": result['rank'], |
|
"Candidate": result['name'], |
|
"Final Score": f"{result['final_score']:.2f}", |
|
"Cross-Encoder": f"{result['cross_encoder_score']:.2f}", |
|
"BM25": f"{result['bm25_score']:.2f}", |
|
"Intent": f"{intent_text} ({result['intent_score']:.1f})", |
|
"Top Skills": ", ".join(result['skills'][:5]) |
|
}) |
|
|
|
summary_df = pd.DataFrame(summary_data) |
|
|
|
|
|
def color_scores(val): |
|
if isinstance(val, str) and any(char.isdigit() for char in val): |
|
try: |
|
|
|
numeric_val = float(''.join(c for c in val if c.isdigit() or c == '.')) |
|
if 'Final Score' in val or numeric_val >= 1.0: |
|
if numeric_val >= 1.2: |
|
return 'background-color: #d4edda' |
|
elif numeric_val >= 1.0: |
|
return 'background-color: #fff3cd' |
|
else: |
|
return 'background-color: #f8d7da' |
|
else: |
|
if numeric_val >= 0.7: |
|
return 'background-color: #d4edda' |
|
elif numeric_val >= 0.5: |
|
return 'background-color: #fff3cd' |
|
else: |
|
return 'background-color: #f8d7da' |
|
except: |
|
pass |
|
return '' |
|
|
|
styled_df = summary_df.style.applymap(color_scores, subset=['Final Score', 'Cross-Encoder', 'BM25']) |
|
st.dataframe(styled_df, use_container_width=True) |
|
|
|
|
|
detailed_data = [] |
|
for result in st.session_state.results: |
|
intent_text = "Yes" if result['intent_score'] == 0.3 else "Maybe" if result['intent_score'] == 0.1 else "No" |
|
|
|
detailed_data.append({ |
|
"Rank": result['rank'], |
|
"Candidate": result['name'], |
|
"Final_Score": result['final_score'], |
|
"Cross_Encoder_Score": result['cross_encoder_score'], |
|
"BM25_Score": result['bm25_score'], |
|
"Intent_Score": result['intent_score'], |
|
"Intent_Analysis": intent_text, |
|
"Skills": "; ".join(result['skills']), |
|
"Resume_Preview": result['text_preview'] |
|
}) |
|
|
|
download_df = pd.DataFrame(detailed_data) |
|
st.markdown(create_download_link(download_df), unsafe_allow_html=True) |
|
|
|
with tab2: |
|
|
|
for result in st.session_state.results: |
|
intent_text = "Yes" if result['intent_score'] == 0.3 else "Maybe" if result['intent_score'] == 0.1 else "No" |
|
|
|
with st.expander(f"#{result['rank']}: {result['name']} (Final Score: {result['final_score']:.2f})"): |
|
col1, col2 = st.columns([1, 2]) |
|
|
|
with col1: |
|
st.metric("π Final Score", f"{result['final_score']:.2f}") |
|
|
|
st.write("**π Score Breakdown:**") |
|
st.metric("π― Cross-Encoder", f"{result['cross_encoder_score']:.2f}", help="Semantic relevance (0-1)") |
|
st.metric("π€ BM25 Keywords", f"{result['bm25_score']:.2f}", help="Keyword matching (0.1-0.2)") |
|
st.metric("π€ Intent Analysis", f"{intent_text} ({result['intent_score']:.1f})", help="Job seeking likelihood (0-0.3)") |
|
|
|
st.write("**π― Matching Skills:**") |
|
skills_per_column = 5 |
|
skill_cols = st.columns(2) |
|
for idx, skill in enumerate(result['skills'][:10]): |
|
with skill_cols[idx % 2]: |
|
st.write(f"β’ {skill}") |
|
|
|
with col2: |
|
st.write("**π Resume Preview:**") |
|
st.text_area("", result['text_preview'], height=200, disabled=True, key=f"preview_{result['rank']}") |
|
|
|
with tab3: |
|
|
|
if len(st.session_state.results) > 1: |
|
|
|
st.subheader("Score Comparison") |
|
|
|
chart_data = pd.DataFrame({ |
|
'Candidate': [r['name'][:20] + '...' if len(r['name']) > 20 else r['name'] |
|
for r in st.session_state.results], |
|
'Final Score': [r['final_score'] for r in st.session_state.results], |
|
'Cross-Encoder': [r['cross_encoder_score'] for r in st.session_state.results], |
|
'BM25': [r['bm25_score'] for r in st.session_state.results], |
|
'Intent': [r['intent_score'] for r in st.session_state.results] |
|
}) |
|
|
|
st.bar_chart(chart_data.set_index('Candidate')) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
st.subheader("Score Distribution") |
|
score_ranges = { |
|
'Excellent (β₯1.2)': sum(1 for r in st.session_state.results if r['final_score'] >= 1.2), |
|
'Good (1.0-1.2)': sum(1 for r in st.session_state.results if 1.0 <= r['final_score'] < 1.2), |
|
'Fair (0.8-1.0)': sum(1 for r in st.session_state.results if 0.8 <= r['final_score'] < 1.0), |
|
'Poor (<0.8)': sum(1 for r in st.session_state.results if r['final_score'] < 0.8), |
|
} |
|
|
|
dist_df = pd.DataFrame({ |
|
'Range': score_ranges.keys(), |
|
'Count': score_ranges.values() |
|
}) |
|
st.bar_chart(dist_df.set_index('Range')) |
|
|
|
with col2: |
|
st.subheader("Average Scores") |
|
avg_final = np.mean([r['final_score'] for r in st.session_state.results]) |
|
avg_cross = np.mean([r['cross_encoder_score'] for r in st.session_state.results]) |
|
avg_bm25 = np.mean([r['bm25_score'] for r in st.session_state.results]) |
|
avg_intent = np.mean([r['intent_score'] for r in st.session_state.results]) |
|
|
|
st.metric("Average Final Score", f"{avg_final:.2f}") |
|
st.metric("Average Cross-Encoder", f"{avg_cross:.2f}") |
|
st.metric("Average BM25", f"{avg_bm25:.2f}") |
|
st.metric("Average Intent", f"{avg_intent:.2f}") |
|
|
|
|
|
st.markdown("---") |
|
st.subheader("π§Ή Reset Application") |
|
col1, col2, col3 = st.columns([1, 1, 3]) |
|
with col1: |
|
if st.button("ποΈ Clear Resumes Only", type="secondary", help="Clear only the loaded resumes"): |
|
st.session_state.resume_texts = [] |
|
st.session_state.file_names = [] |
|
st.session_state.results = [] |
|
st.session_state.current_job_description = "" |
|
st.success("β
Resumes cleared!") |
|
st.rerun() |
|
|
|
with col2: |
|
if st.button("π§Ή Clear Everything", type="primary", help="Clear all data and free memory"): |
|
st.session_state.resume_texts = [] |
|
st.session_state.file_names = [] |
|
st.session_state.results = [] |
|
st.session_state.current_job_description = "" |
|
|
|
if torch.cuda.is_available(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
st.success("β
Everything cleared!") |
|
st.rerun() |
|
|
|
|
|
st.markdown("---") |
|
st.markdown( |
|
""" |
|
<div style='text-align: center; color: #666;'> |
|
π Powered by BAAI/bge-large-en-v1.5 & Qwen3-1.7B | Built with Streamlit |
|
</div> |
|
""", |
|
unsafe_allow_html=True |
|
) |