root commited on
Commit
70c101d
·
1 Parent(s): 5a54bc4
Files changed (2) hide show
  1. app.py +126 -471
  2. requirements.txt +1 -2
app.py CHANGED
@@ -34,6 +34,95 @@ st.set_page_config(
34
  initial_sidebar_state="expanded"
35
  )
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # Sidebar configuration
38
  with st.sidebar:
39
  st.title("⚙️ Configuration")
@@ -63,478 +152,40 @@ with st.sidebar:
63
  st.markdown("### 📈 Scoring Formula")
64
  st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**")
65
 
66
- # Initialize session state
67
- if 'embedding_model' not in st.session_state:
68
- st.session_state.embedding_model = None
69
- if 'cross_encoder' not in st.session_state:
70
- st.session_state.cross_encoder = None
71
- if 'results' not in st.session_state:
72
- st.session_state.results = []
73
- if 'resume_texts' not in st.session_state:
74
- st.session_state.resume_texts = []
75
- if 'file_names' not in st.session_state:
76
- st.session_state.file_names = []
77
- if 'current_job_description' not in st.session_state:
78
- st.session_state.current_job_description = ""
79
- if 'qwen3_1_7b_tokenizer' not in st.session_state:
80
- print("[Init] Loading Qwen3-1.7B Tokenizer...")
81
- st.session_state.qwen3_1_7b_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-1.7B")
82
- print("[Init] Qwen3-1.7B Tokenizer Loaded.")
83
- if 'qwen3_1_7b_model' not in st.session_state:
84
- print("[Init] Loading Qwen3-1.7B Model...")
85
- st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
86
- "Qwen/Qwen3-1.7B", torch_dtype="auto", device_map="auto"
87
- )
88
- print("[Init] Qwen3-1.7B Model Loaded.")
89
-
90
- @st.cache_resource
91
- def load_embedding_model():
92
- """Load and cache the BGE embedding model"""
93
- print("[Cache] Attempting to load Embedding Model (BAAI/bge-large-en-v1.5)...")
94
- try:
95
- device = "cuda" if torch.cuda.is_available() else "cpu"
96
- print(f"[Cache] Using device: {device} for embedding model")
97
- with st.spinner("🔄 Loading BAAI/bge-large-en-v1.5 model..."):
98
- model = SentenceTransformer('BAAI/bge-large-en-v1.5', device=device)
99
- st.success("✅ Embedding model loaded successfully!")
100
- print("[Cache] Embedding Model (BAAI/bge-large-en-v1.5) LOADED.")
101
- return model
102
- except Exception as e:
103
- st.error(f"❌ Error loading embedding model: {str(e)}")
104
- return None
105
-
106
- @st.cache_resource
107
- def load_cross_encoder():
108
- """Load and cache the Cross-Encoder model"""
109
- print("[Cache] Attempting to load Cross-Encoder Model (ms-marco-MiniLM-L6-v2)...")
110
- try:
111
- device = "cuda" if torch.cuda.is_available() else "cpu"
112
- print(f"[Cache] Using device: {device} for cross-encoder model")
113
- with st.spinner("🔄 Loading Cross-Encoder ms-marco-MiniLM-L6-v2..."):
114
- from sentence_transformers import CrossEncoder
115
- model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2', device=device)
116
- st.success("✅ Cross-Encoder model loaded successfully!")
117
- print("[Cache] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED.")
118
- return model
119
- except Exception as e:
120
- st.error(f"❌ Error loading Cross-Encoder model: {str(e)}")
121
- return None
122
-
123
- def generate_qwen3_response(prompt, tokenizer, model, max_new_tokens=200):
124
- messages = [{"role": "user", "content": prompt}]
125
- text = tokenizer.apply_chat_template(
126
- messages,
127
- tokenize=False,
128
- add_generation_prompt=True,
129
- enable_thinking=True
130
- )
131
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
132
- generated_ids = model.generate(
133
- **model_inputs,
134
- max_new_tokens=max_new_tokens
135
- )
136
- output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
137
- response = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
138
- return response
139
-
140
- class ResumeScreener:
141
- def __init__(self):
142
- print("[ResumeScreener] Initializing...")
143
- st.text("Initializing Screener: Loading embedding model...")
144
- self.embedding_model = load_embedding_model()
145
- st.text("Initializing Screener: Loading cross-encoder model...")
146
- self.cross_encoder = load_cross_encoder()
147
- print("[ResumeScreener] Initialized.")
148
- st.text("Screener Ready.")
149
-
150
- def extract_text_from_file(self, file_path, file_type):
151
- """Extract text from various file types"""
152
- try:
153
- if file_type == "pdf":
154
- with open(file_path, 'rb') as file:
155
- with pdfplumber.open(file) as pdf:
156
- text = ""
157
- for page in pdf.pages:
158
- text += page.extract_text() or ""
159
-
160
- if not text.strip():
161
- # Fallback to PyPDF2
162
- file.seek(0)
163
- reader = PyPDF2.PdfReader(file)
164
- text = ""
165
- for page in reader.pages:
166
- text += page.extract_text() or ""
167
- return text
168
-
169
- elif file_type == "docx":
170
- doc = Document(file_path)
171
- return " ".join([paragraph.text for paragraph in doc.paragraphs])
172
-
173
- elif file_type == "txt":
174
- with open(file_path, 'r', encoding='utf-8') as file:
175
- return file.read()
176
-
177
- elif file_type == "csv":
178
- with open(file_path, 'r', encoding='utf-8') as file:
179
- csv_reader = csv.reader(file)
180
- return " ".join([" ".join(row) for row in csv_reader])
181
-
182
- except Exception as e:
183
- st.error(f"Error extracting text from {file_path}: {str(e)}")
184
- return ""
185
-
186
- def get_embedding(self, text):
187
- """Generate embedding for text using BGE model"""
188
- if self.embedding_model is None:
189
- st.error("No embedding model loaded!")
190
- return np.zeros(1024) # BGE-large dimension
191
-
192
- try:
193
- # BGE models recommend adding instruction for retrieval
194
- # For queries (job description)
195
- if len(text) < 500: # Assuming shorter texts are queries
196
- text = "Represent this sentence for searching relevant passages: " + text
197
-
198
- # Truncate text to avoid memory issues
199
- text = text[:8192] if text else ""
200
-
201
- # Generate embedding
202
- embedding = self.embedding_model.encode(text,
203
- convert_to_numpy=True,
204
- normalize_embeddings=True)
205
- return embedding
206
-
207
- except Exception as e:
208
- st.error(f"Error generating embedding: {str(e)}")
209
- return np.zeros(1024) # BGE-large dimension
210
-
211
- def calculate_bm25_scores(self, resume_texts, job_description):
212
- """Calculate BM25 scores for keyword matching"""
213
- try:
214
- job_tokens = word_tokenize(job_description.lower())
215
- corpus = [word_tokenize(text.lower()) for text in resume_texts if text and text.strip()]
216
-
217
- if not corpus:
218
- return [0.0] * len(resume_texts)
219
-
220
- bm25 = BM25Okapi(corpus)
221
- scores = bm25.get_scores(job_tokens)
222
- return scores.tolist()
223
-
224
- except Exception as e:
225
- st.error(f"Error calculating BM25 scores: {str(e)}")
226
- return [0.0] * len(resume_texts)
227
-
228
- def advanced_pipeline_ranking(self, resume_texts, job_description):
229
- """Advanced pipeline: FAISS recall -> Cross-encoder -> BM25 -> LLM intent -> Final ranking"""
230
- print("[Pipeline] Advanced Pipeline Ranking started.")
231
- if not resume_texts:
232
- return []
233
- st.info("🔍 Stage 1: FAISS Recall - Finding top candidates...")
234
- print("[Pipeline] Calling faiss_recall.")
235
- top_50_indices = self.faiss_recall(resume_texts, job_description, top_k=50)
236
- print(f"[Pipeline] faiss_recall returned {len(top_50_indices)} indices.")
237
-
238
- st.info("🎯 Stage 2: Cross-Encoder Re-ranking - Selecting top candidates...")
239
- print("[Pipeline] Calling cross_encoder_rerank.")
240
- top_20_results = self.cross_encoder_rerank(resume_texts, job_description, top_50_indices, top_k=20)
241
- print(f"[Pipeline] cross_encoder_rerank returned {len(top_20_results)} results.")
242
-
243
- st.info("🔤 Stage 3: BM25 Keyword Matching...")
244
- print("[Pipeline] Calling add_bm25_scores.")
245
- top_20_with_bm25 = self.add_bm25_scores(resume_texts, job_description, top_20_results)
246
- print(f"[Pipeline] add_bm25_scores processed.")
247
-
248
- st.info("🤖 Stage 4: LLM Intent Analysis (Qwen3-1.7B)...")
249
- print("[Pipeline] Calling add_intent_scores.")
250
- top_20_with_intent = self.add_intent_scores(resume_texts, job_description, top_20_with_bm25)
251
- print(f"[Pipeline] add_intent_scores processed.")
252
-
253
- st.info("🏆 Stage 5: Final Combined Ranking...")
254
- print("[Pipeline] Calling calculate_final_scores.")
255
- final_results = self.calculate_final_scores(top_20_with_intent)
256
- print(f"[Pipeline] calculate_final_scores returned {len(final_results)} results.")
257
- print("[Pipeline] Advanced Pipeline Ranking finished.")
258
- return final_results[:5] # Return top 5
259
-
260
- def faiss_recall(self, resume_texts, job_description, top_k=50):
261
- """Stage 1: Use FAISS for initial recall to find top 50 resumes"""
262
- print("[faiss_recall] Method started.")
263
- st.text("FAISS Recall: Embedding job description...")
264
- job_embedding = self.get_embedding(job_description)
265
- print("[faiss_recall] Job description embedded.")
266
- st.text(f"FAISS Recall: Embedding {len(resume_texts)} resumes...")
267
- resume_embeddings = []
268
- progress_bar = st.progress(0)
269
-
270
- for i, text in enumerate(resume_texts):
271
- if text:
272
- embedding = self.embedding_model.encode(text[:8192],
273
- convert_to_numpy=True,
274
- normalize_embeddings=True)
275
- resume_embeddings.append(embedding)
276
- else:
277
- resume_embeddings.append(np.zeros(1024))
278
- progress_bar.progress((i + 1) / len(resume_texts))
279
- if i % 10 == 0: # Print progress every 10 resumes
280
- print(f"[faiss_recall] Embedded resume {i+1}/{len(resume_texts)}")
281
-
282
- progress_bar.empty()
283
- print("[faiss_recall] All resumes embedded.")
284
- st.text("FAISS Recall: Building FAISS index...")
285
- resume_embeddings = np.array(resume_embeddings).astype('float32')
286
- dimension = resume_embeddings.shape[1]
287
- index = faiss.IndexFlatIP(dimension) # Inner product for cosine similarity
288
- index.add(resume_embeddings)
289
- print("[faiss_recall] FAISS index built.")
290
- st.text("FAISS Recall: Searching index...")
291
- job_embedding = job_embedding.reshape(1, -1).astype('float32')
292
- scores, indices = index.search(job_embedding, min(top_k, len(resume_texts)))
293
- print("[faiss_recall] FAISS search complete.")
294
- return indices[0].tolist()
295
-
296
- def cross_encoder_rerank(self, resume_texts, job_description, top_50_indices, top_k=20):
297
- """Stage 2: Use Cross-Encoder to re-rank top 50 and select top 20"""
298
- print("[cross_encoder_rerank] Method started.")
299
- try:
300
- if not self.cross_encoder:
301
- st.error("Cross-encoder not loaded!")
302
- return [(idx, 0.0) for idx in top_50_indices[:top_k]]
303
-
304
- # Prepare pairs for cross-encoder
305
- pairs = []
306
- valid_indices = []
307
-
308
- for idx in top_50_indices:
309
- if idx < len(resume_texts) and resume_texts[idx]:
310
- # Truncate texts for cross-encoder
311
- job_snippet = job_description[:512]
312
- resume_snippet = resume_texts[idx][:512]
313
- pairs.append([job_snippet, resume_snippet])
314
- valid_indices.append(idx)
315
-
316
- if not pairs:
317
- return [(idx, 0.0) for idx in top_50_indices[:top_k]]
318
-
319
- st.text(f"Cross-Encoder: Preparing {len(pairs)} pairs for re-ranking...")
320
- print(f"[cross_encoder_rerank] Prepared {len(pairs)} pairs.")
321
- # Get cross-encoder scores
322
- progress_bar = st.progress(0)
323
- scores = []
324
-
325
- # Process in batches to avoid memory issues
326
- batch_size = 8
327
- for i in range(0, len(pairs), batch_size):
328
- batch = pairs[i:i+batch_size]
329
- batch_scores = self.cross_encoder.predict(batch)
330
- scores.extend(batch_scores)
331
- progress_bar.progress(min(1.0, (i + batch_size) / len(pairs)))
332
- print(f"[cross_encoder_rerank] Processed batch {i//batch_size + 1}")
333
-
334
- progress_bar.empty()
335
- print("[cross_encoder_rerank] All pairs scored.")
336
- st.text("Cross-Encoder: Re-ranking complete.")
337
-
338
- # Combine indices with scores and sort
339
- indexed_scores = list(zip(valid_indices, scores))
340
- indexed_scores.sort(key=lambda x: x[1], reverse=True)
341
-
342
- return indexed_scores[:top_k]
343
-
344
- except Exception as e:
345
- st.error(f"Error in cross-encoder re-ranking: {str(e)}")
346
- return [(idx, 0.0) for idx in top_50_indices[:top_k]]
347
-
348
- def add_bm25_scores(self, resume_texts, job_description, top_20_results):
349
- """Stage 3: Add BM25 scores to top 20 resumes"""
350
- print("[add_bm25_scores] Method started.")
351
- st.text("BM25: Calculating keyword scores...")
352
- try:
353
- # Get texts for top 20
354
- top_20_texts = [resume_texts[idx] for idx, _ in top_20_results]
355
-
356
- # Calculate BM25 scores
357
- bm25_scores = self.calculate_bm25_scores(top_20_texts, job_description)
358
-
359
- # Normalize BM25 scores to 0.1-0.2 range
360
- if bm25_scores and max(bm25_scores) > 0:
361
- max_bm25 = max(bm25_scores)
362
- min_bm25 = min(bm25_scores)
363
- if max_bm25 > min_bm25:
364
- normalized_bm25 = [
365
- 0.1 + 0.1 * (score - min_bm25) / (max_bm25 - min_bm25)
366
- for score in bm25_scores
367
- ]
368
- else:
369
- normalized_bm25 = [0.15] * len(bm25_scores)
370
- else:
371
- normalized_bm25 = [0.15] * len(top_20_results)
372
-
373
- # Combine with existing results
374
- results_with_bm25 = []
375
- for i, (idx, cross_score) in enumerate(top_20_results):
376
- bm25_score = normalized_bm25[i] if i < len(normalized_bm25) else 0.15
377
- results_with_bm25.append((idx, cross_score, bm25_score))
378
-
379
- print("[add_bm25_scores] BM25 scores calculated and normalized.")
380
- st.text("BM25: Keyword scores added.")
381
- return results_with_bm25
382
-
383
- except Exception as e:
384
- st.error(f"Error adding BM25 scores: {str(e)}")
385
- return [(idx, cross_score, 0.15) for idx, cross_score in top_20_results]
386
-
387
- def add_intent_scores(self, resume_texts, job_description, top_20_with_bm25):
388
- """Stage 4: Add LLM intent analysis scores"""
389
- print("[add_intent_scores] Method started.")
390
- st.text(f"LLM Intent: Analyzing intent for {len(top_20_with_bm25)} candidates (Qwen3-1.7B)...")
391
- results_with_intent = []
392
- progress_bar = st.progress(0)
393
-
394
- for i, (idx, cross_score, bm25_score) in enumerate(top_20_with_bm25):
395
- intent_score = self.analyze_intent(resume_texts[idx], job_description)
396
- results_with_intent.append((idx, cross_score, bm25_score, intent_score))
397
- progress_bar.progress((i + 1) / len(top_20_with_bm25))
398
- print(f"[add_intent_scores] Intent analyzed for candidate {i+1}")
399
-
400
- progress_bar.empty()
401
- print("[add_intent_scores] All intents analyzed.")
402
- st.text("LLM Intent: Analysis complete.")
403
- return results_with_intent
404
-
405
- def analyze_intent(self, resume_text, job_description):
406
- """Analyze candidate's intent using Qwen3-1.7B LLM with thinking enabled."""
407
- print(f"[analyze_intent] Analyzing intent for one resume (Qwen3-1.7B)...")
408
- st.text("LLM Intent: Analyzing intent (Qwen3-1.7B)...")
409
- try:
410
- resume_snippet = resume_text[:15000]
411
- job_snippet = job_description[:5000]
412
-
413
- prompt = f"""You are given a job description and a candidate's resume.\nAnalyze the candidate's resume in detail against the job description to determine if they are genuinely seeking this specific job, or if their profile is a more general fit or perhaps a mismatch.\nProvide a step-by-step thought process for your decision.\nFinally, clearly answer: \"Is the candidate likely seeking THIS SPECIFIC job? Respond with 'Yes', 'Maybe', or 'No' and give a brief justification based on your thought process.\"\n\nJob Description:\n{job_snippet}\n\nCandidate Resume:\n{resume_snippet}\n\nResponse format:\n<think>\n[Your detailed step-by-step thought process comparing resume to JD, noting specific alignments or mismatches that indicate intent. Be thorough.]\n</think>\nIntent: [Yes/Maybe/No]\nReason: [Brief justification based on your thought process]"""
414
-
415
- response_text = generate_qwen3_response(
416
- prompt,
417
- st.session_state.qwen3_1_7b_tokenizer,
418
- st.session_state.qwen3_1_7b_model,
419
- max_new_tokens=20000
420
- )
421
- print(f"[analyze_intent] Qwen3-1.7B full response (first 100 chars): {response_text[:100]}...")
422
-
423
- thinking_content = "No detailed thought process extracted."
424
- intent_decision_part = response_text
425
-
426
- think_start_tag = "<think>"
427
- think_end_tag = "</think>"
428
-
429
- start_index = response_text.find(think_start_tag)
430
- end_index = response_text.rfind(think_end_tag)
431
-
432
- if start_index != -1 and end_index != -1 and start_index < end_index:
433
- thinking_content = response_text[start_index + len(think_start_tag):end_index].strip()
434
- intent_decision_part = response_text[end_index + len(think_end_tag):].strip()
435
- print(f"[analyze_intent] Thinking content extracted (first 50 chars): {thinking_content[:50]}...")
436
- else:
437
- print("[analyze_intent] <think> block not found or malformed in response.")
438
-
439
- response_lower = intent_decision_part.lower()
440
- intent_score = 0.1
441
- if 'intent: yes' in response_lower or 'intent:yes' in response_lower:
442
- intent_score = 0.3
443
- elif 'intent: no' in response_lower or 'intent:no' in response_lower:
444
- intent_score = 0.0
445
-
446
- print(f"[analyze_intent] Parsed Intent: {intent_score}, Decision part: {intent_decision_part[:100]}...")
447
- return intent_score
448
-
449
- except Exception as e:
450
- st.warning(f"Error analyzing intent with Qwen3-1.7B: {str(e)}")
451
- print(f"[analyze_intent] EXCEPTION: {str(e)}")
452
- return 0.1
453
-
454
- def calculate_final_scores(self, results_with_all_scores):
455
- """Stage 5: Calculate final combined scores"""
456
- print("[calculate_final_scores] Method started.")
457
- st.text("Final Ranking: Calculating combined scores...")
458
- try:
459
- final_results = []
460
-
461
- for idx, cross_score, bm25_score, intent_score in results_with_all_scores:
462
- # Normalize cross-encoder score to 0-1 range
463
- normalized_cross = max(0, min(1, cross_score))
464
-
465
- # Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)
466
- final_score = normalized_cross + bm25_score + intent_score
467
-
468
- final_results.append({
469
- 'index': idx,
470
- 'cross_encoder_score': normalized_cross,
471
- 'bm25_score': bm25_score,
472
- 'intent_score': intent_score,
473
- 'final_score': final_score
474
- })
475
-
476
- # Sort by final score
477
- final_results.sort(key=lambda x: x['final_score'], reverse=True)
478
-
479
- print("[calculate_final_scores] Final scores calculated and sorted.")
480
- st.text("Final Ranking: Complete.")
481
- return final_results
482
-
483
- except Exception as e:
484
- st.error(f"Error calculating final scores: {str(e)}")
485
- return []
486
-
487
- def extract_skills(self, text, job_description):
488
- """Extract skills from resume based on job description"""
489
- if not text:
490
- return []
491
-
492
- # Common tech skills
493
- common_skills = [
494
- "python", "java", "javascript", "react", "angular", "vue", "node.js",
495
- "express", "django", "flask", "spring", "sql", "nosql", "html", "css",
496
- "aws", "azure", "gcp", "docker", "kubernetes", "jenkins", "git", "github",
497
- "agile", "scrum", "jira", "ci/cd", "devops", "microservices", "rest", "api",
498
- "machine learning", "deep learning", "data science", "artificial intelligence",
499
- "tensorflow", "pytorch", "keras", "scikit-learn", "pandas", "numpy",
500
- "matplotlib", "seaborn", "jupyter", "r", "sas", "spss", "tableau", "powerbi",
501
- "excel", "mysql", "postgresql", "mongodb", "redis", "elasticsearch",
502
- "kafka", "rabbitmq", "spark", "hadoop", "hive", "airflow", "linux", "unix"
503
- ]
504
-
505
- # Extract potential skills from job description
506
- job_words = set(word.lower() for word in word_tokenize(job_description) if len(word) > 2)
507
-
508
- # Find matching skills
509
- found_skills = []
510
- text_lower = text.lower()
511
-
512
- # Check common skills that appear in both resume and job description
513
- for skill in common_skills:
514
- if skill in text_lower and any(skill in job_word for job_word in job_words):
515
- found_skills.append(skill)
516
-
517
- # Check for skills mentioned in job description
518
- for word in job_words:
519
- if len(word) > 3 and word in text_lower and word not in found_skills:
520
- # Basic filter to avoid common words
521
- if word not in ['with', 'have', 'that', 'this', 'from', 'what', 'when', 'where']:
522
- found_skills.append(word)
523
-
524
- return list(set(found_skills))[:15] # Return top 15 unique skills
525
-
526
- def create_download_link(df, filename="resume_screening_results.csv"):
527
- """Create download link for results"""
528
- csv = df.to_csv(index=False)
529
- b64 = base64.b64encode(csv.encode()).decode()
530
- return f'<a href="data:file/csv;base64,{b64}" download="{filename}" class="download-btn">📥 Download Results CSV</a>'
531
-
532
- # Main App Interface
533
- st.title("🎯 AI-Powered Resume Screener")
534
- st.markdown("*Find the perfect candidates using BAAI/bge-large-en-v1.5 embeddings and Qwen3-1.7B for intent analysis*")
535
  st.markdown("---")
536
 
537
- # Initialize screener
538
  screener = ResumeScreener()
539
 
540
  # Job Description Input
@@ -725,7 +376,11 @@ col1, col2 = st.columns([1, 1])
725
 
726
  with col1:
727
  if st.button("🚀 Advanced Pipeline Analysis",
728
- disabled=not (job_description and st.session_state.resume_texts),
 
 
 
 
729
  type="primary",
730
  help="Run the complete 5-stage advanced pipeline"):
731
  print("--- Advanced Pipeline Analysis Button Clicked ---")
 
34
  initial_sidebar_state="expanded"
35
  )
36
 
37
+ # --- Global Device and Model Loading Section ---
38
+
39
+ # Initialize session state keys for all models, their loading status/errors, and app data
40
+ keys_to_initialize = {
41
+ 'embedding_model': None, 'embedding_model_error': None,
42
+ 'cross_encoder': None, 'cross_encoder_error': None,
43
+ 'qwen3_1_7b_tokenizer': None, 'qwen3_1_7b_tokenizer_error': None,
44
+ 'qwen3_1_7b_model': None, 'qwen3_1_7b_model_error': None,
45
+ 'results': [], 'resume_texts': [], 'file_names': [], 'current_job_description': ""
46
+ # Add any other app-specific session state keys here if needed
47
+ }
48
+ for key, default_value in keys_to_initialize.items():
49
+ if key not in st.session_state:
50
+ st.session_state[key] = default_value
51
+
52
+ # Load Embedding Model (BAAI/bge-large-en-v1.5)
53
+ if st.session_state.embedding_model is None and st.session_state.embedding_model_error is None:
54
+ print("[Global Init] Attempting to load Embedding Model (BAAI/bge-large-en-v1.5) with device_map='auto'...")
55
+ try:
56
+ st.session_state.embedding_model = SentenceTransformer(
57
+ 'BAAI/bge-large-en-v1.5',
58
+ device_map="auto"
59
+ )
60
+ print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED with device_map='auto'.")
61
+ except Exception as e:
62
+ if "device_map" in str(e).lower() and "unexpected keyword argument" in str(e).lower():
63
+ print("⚠️ [Global Init] device_map='auto' not supported for SentenceTransformer. Falling back to default device handling.")
64
+ try:
65
+ st.session_state.embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5')
66
+ print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED (fallback device handling).")
67
+ except Exception as e_fallback:
68
+ error_msg = f"Failed to load Embedding Model (fallback): {str(e_fallback)}"
69
+ print(f"❌ [Global Init] {error_msg}")
70
+ st.session_state.embedding_model_error = error_msg
71
+ else:
72
+ error_msg = f"Failed to load Embedding Model: {str(e)}"
73
+ print(f"❌ [Global Init] {error_msg}")
74
+ st.session_state.embedding_model_error = error_msg
75
+
76
+ # Load Cross-Encoder Model (ms-marco-MiniLM-L6-v2)
77
+ if st.session_state.cross_encoder is None and st.session_state.cross_encoder_error is None:
78
+ print("[Global Init] Attempting to load Cross-Encoder Model (ms-marco-MiniLM-L6-v2) with device_map='auto'...")
79
+ try:
80
+ st.session_state.cross_encoder = CrossEncoder(
81
+ 'cross-encoder/ms-marco-MiniLM-L6-v2',
82
+ device_map="auto"
83
+ )
84
+ print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED with device_map='auto'.")
85
+ except Exception as e:
86
+ if "device_map" in str(e).lower() and "unexpected keyword argument" in str(e).lower():
87
+ print("⚠️ [Global Init] device_map='auto' not supported for CrossEncoder. Falling back to default device handling.")
88
+ try:
89
+ st.session_state.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2')
90
+ print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED (fallback device handling).")
91
+ except Exception as e_fallback:
92
+ error_msg = f"Failed to load Cross-Encoder Model (fallback): {str(e_fallback)}"
93
+ print(f"❌ [Global Init] {error_msg}")
94
+ st.session_state.cross_encoder_error = error_msg
95
+ else:
96
+ error_msg = f"Failed to load Cross-Encoder Model: {str(e)}"
97
+ print(f"❌ [Global Init] {error_msg}")
98
+ st.session_state.cross_encoder_error = error_msg
99
+
100
+ # Load Qwen3-1.7B Tokenizer
101
+ if st.session_state.qwen3_1_7b_tokenizer is None and st.session_state.qwen3_1_7b_tokenizer_error is None:
102
+ print("[Global Init] Loading Qwen3-1.7B Tokenizer...")
103
+ try:
104
+ st.session_state.qwen3_1_7b_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-1.7B")
105
+ print("[Global Init] Qwen3-1.7B Tokenizer Loaded.")
106
+ except Exception as e:
107
+ error_msg = f"Failed to load Qwen3-1.7B Tokenizer: {str(e)}"
108
+ print(f"❌ [Global Init] {error_msg}")
109
+ st.session_state.qwen3_1_7b_tokenizer_error = error_msg
110
+
111
+ # Load Qwen3-1.7B Model
112
+ if st.session_state.qwen3_1_7b_model is None and st.session_state.qwen3_1_7b_model_error is None:
113
+ print("[Global Init] Loading Qwen3-1.7B Model...")
114
+ try:
115
+ st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
116
+ "Qwen/Qwen3-1.7B", torch_dtype="auto", device_map="auto"
117
+ )
118
+ print("[Global Init] Qwen3-1.7B Model Loaded.")
119
+ except Exception as e:
120
+ error_msg = f"Failed to load Qwen3-1.7B Model: {str(e)}"
121
+ print(f"❌ [Global Init] {error_msg}")
122
+ st.session_state.qwen3_1_7b_model_error = error_msg
123
+
124
+ # --- End of Global Model Loading Section ---
125
+
126
  # Sidebar configuration
127
  with st.sidebar:
128
  st.title("⚙️ Configuration")
 
152
  st.markdown("### 📈 Scoring Formula")
153
  st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**")
154
 
155
+ # Display Model Loading Status from Global Init
156
+ st.subheader("🤖 Model Loading Status")
157
+ col1, col2 = st.columns(2)
158
+ with col1:
159
+ if st.session_state.get('embedding_model_error'):
160
+ st.error(f"Embedding Model: {st.session_state.embedding_model_error}")
161
+ elif st.session_state.get('embedding_model'):
162
+ st.success("✅ Embedding Model (BAAI/bge-large-en-v1.5) loaded.")
163
+ else:
164
+ st.warning("⏳ Embedding Model loading or not found (check console).")
165
+
166
+ if st.session_state.get('cross_encoder_error'):
167
+ st.error(f"Cross-Encoder Model: {st.session_state.cross_encoder_error}")
168
+ elif st.session_state.get('cross_encoder'):
169
+ st.success(" Cross-Encoder Model (ms-marco-MiniLM-L6-v2) loaded.")
170
+ else:
171
+ st.warning(" Cross-Encoder Model loading or not found (check console).")
172
+ with col2:
173
+ if st.session_state.get('qwen3_1_7b_tokenizer_error'):
174
+ st.error(f"Qwen3-1.7B Tokenizer: {st.session_state.qwen3_1_7b_tokenizer_error}")
175
+ elif st.session_state.get('qwen3_1_7b_tokenizer'):
176
+ st.success("✅ Qwen3-1.7B Tokenizer loaded.")
177
+ else:
178
+ st.warning("⏳ Qwen3-1.7B Tokenizer loading or not found (check console).")
179
+
180
+ if st.session_state.get('qwen3_1_7b_model_error'):
181
+ st.error(f"Qwen3-1.7B Model: {st.session_state.qwen3_1_7b_model_error}")
182
+ elif st.session_state.get('qwen3_1_7b_model'):
183
+ st.success("✅ Qwen3-1.7B Model loaded.")
184
+ else:
185
+ st.warning(" Qwen3-1.7B Model loading or not found (check console).")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  st.markdown("---")
187
 
188
+ # Initialize screener (after global model loading attempts)
189
  screener = ResumeScreener()
190
 
191
  # Job Description Input
 
376
 
377
  with col1:
378
  if st.button("🚀 Advanced Pipeline Analysis",
379
+ disabled=not (job_description and st.session_state.resume_texts and
380
+ st.session_state.get('embedding_model') and
381
+ st.session_state.get('cross_encoder') and
382
+ st.session_state.get('qwen3_1_7b_model') and
383
+ st.session_state.get('qwen3_1_7b_tokenizer')),
384
  type="primary",
385
  help="Run the complete 5-stage advanced pipeline"):
386
  print("--- Advanced Pipeline Analysis Button Clicked ---")
requirements.txt CHANGED
@@ -14,6 +14,5 @@ huggingface-hub==0.30.0
14
  bitsandbytes==0.44.1
15
  accelerate==0.27.2
16
  datasets==2.18.0
17
- sentence-transformers==2.6.1
18
- plotly==5.18.0
19
  einops
 
14
  bitsandbytes==0.44.1
15
  accelerate==0.27.2
16
  datasets==2.18.0
17
+ sentence-transformers==2.7.0
 
18
  einops