root commited on
Commit
c71ed9b
Β·
1 Parent(s): 70c101d
Files changed (1) hide show
  1. app.py +312 -20
app.py CHANGED
@@ -110,41 +110,330 @@ if st.session_state.qwen3_1_7b_tokenizer is None and st.session_state.qwen3_1_7b
110
 
111
  # Load Qwen3-1.7B Model
112
  if st.session_state.qwen3_1_7b_model is None and st.session_state.qwen3_1_7b_model_error is None:
113
- print("[Global Init] Loading Qwen3-1.7B Model...")
114
  try:
115
  st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
116
- "Qwen/Qwen3-1.7B", torch_dtype="auto", device_map="auto"
 
 
 
117
  )
118
- print("[Global Init] Qwen3-1.7B Model Loaded.")
119
- except Exception as e:
120
- error_msg = f"Failed to load Qwen3-1.7B Model: {str(e)}"
121
- print(f"❌ [Global Init] {error_msg}")
122
- st.session_state.qwen3_1_7b_model_error = error_msg
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  # --- End of Global Model Loading Section ---
125
 
126
- # Sidebar configuration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  with st.sidebar:
128
  st.title("βš™οΈ Configuration")
129
-
130
  # Advanced options
131
  st.subheader("Advanced Options")
132
- top_k = st.selectbox("Number of results to display", [1,2,3,4,5], index=4)
 
 
 
133
 
134
  # LLM Settings
135
  st.subheader("LLM Settings")
136
- use_llm_explanations = st.checkbox("Generate AI Explanations", value=True)
137
- if use_llm_explanations:
138
- hf_token = st.text_input("Hugging Face Token (optional)", type="password",
139
- help="Enter your HF token for better rate limits")
140
 
141
  st.markdown("---")
142
  st.markdown("### πŸ€– Advanced Pipeline")
143
  st.markdown("- **Stage 1**: FAISS Recall (Top 50)")
144
  st.markdown("- **Stage 2**: Cross-Encoder Re-ranking (Top 20)")
145
  st.markdown("- **Stage 3**: BM25 Keyword Matching")
146
- st.markdown("- **Stage 4**: LLM Intent Analysis")
147
- st.markdown("- **Final**: Combined Scoring (Top 5)")
148
  st.markdown("### πŸ“Š Models Used")
149
  st.markdown("- **Embedding**: BAAI/bge-large-en-v1.5")
150
  st.markdown("- **Cross-Encoder**: ms-marco-MiniLM-L6-v2")
@@ -152,7 +441,12 @@ with st.sidebar:
152
  st.markdown("### πŸ“ˆ Scoring Formula")
153
  st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**")
154
 
155
- # Display Model Loading Status from Global Init
 
 
 
 
 
156
  st.subheader("πŸ€– Model Loading Status")
157
  col1, col2 = st.columns(2)
158
  with col1:
@@ -162,7 +456,6 @@ with col1:
162
  st.success("βœ… Embedding Model (BAAI/bge-large-en-v1.5) loaded.")
163
  else:
164
  st.warning("⏳ Embedding Model loading or not found (check console).")
165
-
166
  if st.session_state.get('cross_encoder_error'):
167
  st.error(f"Cross-Encoder Model: {st.session_state.cross_encoder_error}")
168
  elif st.session_state.get('cross_encoder'):
@@ -176,7 +469,6 @@ with col2:
176
  st.success("βœ… Qwen3-1.7B Tokenizer loaded.")
177
  else:
178
  st.warning("⏳ Qwen3-1.7B Tokenizer loading or not found (check console).")
179
-
180
  if st.session_state.get('qwen3_1_7b_model_error'):
181
  st.error(f"Qwen3-1.7B Model: {st.session_state.qwen3_1_7b_model_error}")
182
  elif st.session_state.get('qwen3_1_7b_model'):
@@ -185,7 +477,7 @@ with col2:
185
  st.warning("⏳ Qwen3-1.7B Model loading or not found (check console).")
186
  st.markdown("---")
187
 
188
- # Initialize screener (after global model loading attempts)
189
  screener = ResumeScreener()
190
 
191
  # Job Description Input
 
110
 
111
  # Load Qwen3-1.7B Model
112
  if st.session_state.qwen3_1_7b_model is None and st.session_state.qwen3_1_7b_model_error is None:
113
+ print("[Global Init] Loading Qwen3-1.7B Model (attempting with device_map='auto')...")
114
  try:
115
  st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
116
+ "Qwen/Qwen3-1.7B",
117
+ torch_dtype="auto",
118
+ device_map="auto",
119
+ trust_remote_code=True # if required by this specific model
120
  )
121
+ print("[Global Init] Qwen3-1.7B Model Loaded with device_map='auto'.")
122
+ except Exception as e_dev_map:
123
+ print(f"⚠️ [Global Init] Failed to load Qwen3-1.7B with device_map='auto': {str(e_dev_map)}")
124
+ print("[Global Init] Retrying Qwen3-1.7B load without device_map (will use default single device)...")
125
+ try:
126
+ st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
127
+ "Qwen/Qwen3-1.7B",
128
+ torch_dtype="auto",
129
+ # No device_map here, let Hugging Face decide or use CUDA if available
130
+ trust_remote_code=True # if required
131
+ )
132
+ print("[Global Init] Qwen3-1.7B Model Loaded (fallback device handling).")
133
+ except Exception as e_fallback:
134
+ error_msg = f"Failed to load Qwen3-1.7B Model (fallback): {str(e_fallback)}"
135
+ print(f"❌ [Global Init] {error_msg}")
136
+ st.session_state.qwen3_1_7b_model_error = error_msg
137
 
138
  # --- End of Global Model Loading Section ---
139
 
140
+ # --- Class Definitions and Helper Functions ---
141
+
142
+ def generate_qwen3_response(prompt, tokenizer, model, max_new_tokens=200):
143
+ # ... (implementation of generate_qwen3_response)
144
+ messages = [{"role": "user", "content": prompt}]
145
+ text = tokenizer.apply_chat_template(
146
+ messages,
147
+ tokenize=False,
148
+ add_generation_prompt=True,
149
+ enable_thinking=True # As per Qwen3-1.7B docs for thinking mode
150
+ )
151
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
152
+ generated_ids = model.generate(
153
+ **model_inputs,
154
+ max_new_tokens=max_new_tokens
155
+ )
156
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
157
+ response = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
158
+ return response
159
+
160
+ class ResumeScreener: # Ensure this class definition is BEFORE it's instantiated
161
+ def __init__(self):
162
+ # ... (init logic as before, referencing st.session_state for models)
163
+ print("[ResumeScreener] Initializing with references to globally loaded models...")
164
+ self.embedding_model = st.session_state.get('embedding_model')
165
+ self.cross_encoder = st.session_state.get('cross_encoder')
166
+
167
+ if self.embedding_model:
168
+ print("[ResumeScreener] Embedding model reference set.")
169
+ else:
170
+ print("[ResumeScreener] Embedding model not available (check loading errors).")
171
+
172
+ if self.cross_encoder:
173
+ print("[ResumeScreener] Cross-encoder model reference set.")
174
+ else:
175
+ print("[ResumeScreener] Cross-encoder model not available (check loading errors).")
176
+
177
+ print("[ResumeScreener] Initialization complete.")
178
+
179
+ # ... (all other methods of ResumeScreener: extract_text_from_file, get_embedding,
180
+ # calculate_bm25_scores, advanced_pipeline_ranking, faiss_recall, cross_encoder_rerank,
181
+ # add_bm25_scores, add_intent_scores, analyze_intent, calculate_final_scores, extract_skills)
182
+ # Make sure all methods are correctly indented within the class
183
+
184
+ def extract_text_from_file(self, file_path, file_type):
185
+ # ... (implementation)
186
+ try:
187
+ if file_type == "pdf":
188
+ with open(file_path, 'rb') as file:
189
+ with pdfplumber.open(file) as pdf:
190
+ text = ""
191
+ for page in pdf.pages:
192
+ text += page.extract_text() or ""
193
+ if not text.strip():
194
+ file.seek(0)
195
+ reader = PyPDF2.PdfReader(file)
196
+ text = ""
197
+ for page_num in range(len(reader.pages)):
198
+ text += reader.pages[page_num].extract_text() or ""
199
+ return text
200
+ elif file_type == "docx":
201
+ doc = Document(file_path)
202
+ return " ".join([paragraph.text for paragraph in doc.paragraphs])
203
+ elif file_type == "txt":
204
+ with open(file_path, 'r', encoding='utf-8') as file:
205
+ return file.read()
206
+ elif file_type == "csv":
207
+ with open(file_path, 'r', encoding='utf-8') as file:
208
+ csv_reader = csv.reader(file)
209
+ return " ".join([" ".join(row) for row in csv_reader])
210
+ except Exception as e:
211
+ st.error(f"Error extracting text from {file_path}: {str(e)}")
212
+ return ""
213
+
214
+ def get_embedding(self, text):
215
+ if self.embedding_model is None:
216
+ st.error("Embedding model is not available!")
217
+ return np.zeros(1024)
218
+ try:
219
+ if len(text) < 500:
220
+ text = "Represent this sentence for searching relevant passages: " + text
221
+ text = text[:8192] if text else ""
222
+ embedding = self.embedding_model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
223
+ return embedding
224
+ except Exception as e:
225
+ st.error(f"Error generating embedding: {str(e)}")
226
+ return np.zeros(1024)
227
+
228
+ def calculate_bm25_scores(self, resume_texts, job_description):
229
+ try:
230
+ job_tokens = word_tokenize(job_description.lower())
231
+ corpus = [word_tokenize(text.lower()) for text in resume_texts if text and text.strip()]
232
+ if not corpus:
233
+ return [0.0] * len(resume_texts)
234
+ bm25 = BM25Okapi(corpus)
235
+ scores = bm25.get_scores(job_tokens)
236
+ return scores.tolist()
237
+ except Exception as e:
238
+ st.error(f"Error calculating BM25 scores: {str(e)}")
239
+ return [0.0] * len(resume_texts)
240
+
241
+ def advanced_pipeline_ranking(self, resume_texts, job_description):
242
+ print("[Pipeline] Advanced Pipeline Ranking started.")
243
+ if not resume_texts:
244
+ return []
245
+ st.info("πŸ” Stage 1: FAISS Recall - Finding top candidates...")
246
+ top_50_indices = self.faiss_recall(resume_texts, job_description, top_k=50)
247
+ st.info("🎯 Stage 2: Cross-Encoder Re-ranking - Selecting top candidates...")
248
+ top_20_results = self.cross_encoder_rerank(resume_texts, job_description, top_50_indices, top_k=20)
249
+ st.info("πŸ”€ Stage 3: BM25 Keyword Matching...")
250
+ top_20_with_bm25 = self.add_bm25_scores(resume_texts, job_description, top_20_results)
251
+ st.info("πŸ€– Stage 4: LLM Intent Analysis (Qwen3-1.7B)...")
252
+ top_20_with_intent = self.add_intent_scores(resume_texts, job_description, top_20_with_bm25)
253
+ st.info("πŸ† Stage 5: Final Combined Ranking...")
254
+ final_results = self.calculate_final_scores(top_20_with_intent)
255
+ print("[Pipeline] Advanced Pipeline Ranking finished.")
256
+ return final_results[:st.session_state.get('top_k', 5)]
257
+
258
+ def faiss_recall(self, resume_texts, job_description, top_k=50):
259
+ print("[faiss_recall] Method started.")
260
+ st.text("FAISS Recall: Embedding job description...")
261
+ job_embedding = self.get_embedding(job_description)
262
+ st.text(f"FAISS Recall: Embedding {len(resume_texts)} resumes...")
263
+ resume_embeddings = []
264
+ progress_bar = st.progress(0)
265
+ for i, text in enumerate(resume_texts):
266
+ if text:
267
+ embedding = self.embedding_model.encode(text[:8192], convert_to_numpy=True, normalize_embeddings=True)
268
+ resume_embeddings.append(embedding)
269
+ else:
270
+ resume_embeddings.append(np.zeros(1024))
271
+ progress_bar.progress((i + 1) / len(resume_texts))
272
+ progress_bar.empty()
273
+ resume_embeddings_np = np.array(resume_embeddings).astype('float32') # Renamed variable
274
+ if resume_embeddings_np.ndim == 1: # Handle case of single resume
275
+ resume_embeddings_np = resume_embeddings_np.reshape(1, -1)
276
+ if resume_embeddings_np.size == 0:
277
+ print("[faiss_recall] No resume embeddings to add to FAISS index.")
278
+ return [] # Or handle error appropriately
279
+
280
+ dimension = resume_embeddings_np.shape[1]
281
+ index = faiss.IndexFlatIP(dimension)
282
+ index.add(resume_embeddings_np)
283
+ job_embedding_np = job_embedding.reshape(1, -1).astype('float32') # Renamed variable
284
+ scores, indices = index.search(job_embedding_np, min(top_k, len(resume_texts)))
285
+ return indices[0].tolist()
286
+
287
+ def cross_encoder_rerank(self, resume_texts, job_description, top_50_indices, top_k=20):
288
+ print("[cross_encoder_rerank] Method started.")
289
+ if not self.cross_encoder:
290
+ st.error("Cross-encoder model is not available!")
291
+ return [(idx, 0.0) for idx in top_50_indices[:top_k]]
292
+ pairs = []
293
+ valid_indices = []
294
+ for idx in top_50_indices:
295
+ if idx < len(resume_texts) and resume_texts[idx]:
296
+ job_snippet = job_description[:512]
297
+ resume_snippet = resume_texts[idx][:512]
298
+ pairs.append([job_snippet, resume_snippet])
299
+ valid_indices.append(idx)
300
+ if not pairs:
301
+ return [(idx, 0.0) for idx in top_50_indices[:top_k]]
302
+ st.text(f"Cross-Encoder: Preparing {len(pairs)} pairs for re-ranking...")
303
+ scores = []
304
+ batch_size = 8
305
+ progress_bar = st.progress(0)
306
+ for i in range(0, len(pairs), batch_size):
307
+ batch = pairs[i:i+batch_size]
308
+ batch_scores = self.cross_encoder.predict(batch)
309
+ scores.extend(batch_scores)
310
+ progress_bar.progress(min(1.0, (i + batch_size) / len(pairs)))
311
+ progress_bar.empty()
312
+ indexed_scores = list(zip(valid_indices, scores))
313
+ indexed_scores.sort(key=lambda x: x[1], reverse=True)
314
+ return indexed_scores[:top_k]
315
+
316
+ def add_bm25_scores(self, resume_texts, job_description, top_20_results):
317
+ st.text("BM25: Calculating keyword scores...")
318
+ top_20_texts = [resume_texts[idx] for idx, _ in top_20_results]
319
+ bm25_scores_raw = self.calculate_bm25_scores(top_20_texts, job_description)
320
+ if bm25_scores_raw and max(bm25_scores_raw) > 0:
321
+ max_bm25, min_bm25 = max(bm25_scores_raw), min(bm25_scores_raw)
322
+ if max_bm25 > min_bm25:
323
+ normalized_bm25 = [0.1 + 0.1 * (s - min_bm25) / (max_bm25 - min_bm25) for s in bm25_scores_raw]
324
+ else:
325
+ normalized_bm25 = [0.15] * len(bm25_scores_raw)
326
+ else:
327
+ normalized_bm25 = [0.15] * len(top_20_results)
328
+ results_with_bm25 = []
329
+ for i, (idx, cross_score) in enumerate(top_20_results):
330
+ results_with_bm25.append((idx, cross_score, normalized_bm25[i] if i < len(normalized_bm25) else 0.15))
331
+ return results_with_bm25
332
+
333
+ def add_intent_scores(self, resume_texts, job_description, top_20_with_bm25):
334
+ st.text(f"LLM Intent: Analyzing intent for {len(top_20_with_bm25)} candidates (Qwen3-1.7B)...")
335
+ results_with_intent = []
336
+ progress_bar = st.progress(0)
337
+ for i, (idx, cross_score, bm25_score) in enumerate(top_20_with_bm25):
338
+ intent_score = self.analyze_intent(resume_texts[idx], job_description)
339
+ results_with_intent.append((idx, cross_score, bm25_score, intent_score))
340
+ progress_bar.progress((i + 1) / len(top_20_with_bm25))
341
+ progress_bar.empty()
342
+ return results_with_intent
343
+
344
+ def analyze_intent(self, resume_text, job_description):
345
+ print(f"[analyze_intent] Analyzing intent for one resume (Qwen3-1.7B)...")
346
+ st.text("LLM Intent: Analyzing intent (Qwen3-1.7B)...")
347
+ try:
348
+ resume_snippet = resume_text[:15000]
349
+ job_snippet = job_description[:5000]
350
+ prompt = f\"\"\"You are given a job description and a candidate's resume... (rest of prompt)\"\"\" # Ensure f-string is correct
351
+ # ... (rest of analyze_intent, using st.session_state.qwen3_1_7b_tokenizer and _model)
352
+ response_text = generate_qwen3_response(
353
+ prompt,
354
+ st.session_state.qwen3_1_7b_tokenizer,
355
+ st.session_state.qwen3_1_7b_model,
356
+ max_new_tokens=20000
357
+ )
358
+ # ... (parsing logic for response_text) ...
359
+ thinking_content = "No detailed thought process extracted."
360
+ intent_decision_part = response_text
361
+ think_start_tag = "<think>"
362
+ think_end_tag = "</think>"
363
+ start_index = response_text.find(think_start_tag)
364
+ end_index = response_text.rfind(think_end_tag)
365
+ if start_index != -1 and end_index != -1 and start_index < end_index:
366
+ thinking_content = response_text[start_index + len(think_start_tag):end_index].strip()
367
+ intent_decision_part = response_text[end_index + len(think_end_tag):].strip()
368
+ response_lower = intent_decision_part.lower()
369
+ intent_score = 0.1
370
+ if 'intent: yes' in response_lower or 'intent:yes' in response_lower:
371
+ intent_score = 0.3
372
+ elif 'intent: no' in response_lower or 'intent:no' in response_lower:
373
+ intent_score = 0.0
374
+ return intent_score
375
+ except Exception as e:
376
+ st.warning(f"Error analyzing intent with Qwen3-1.7B: {str(e)}")
377
+ return 0.1
378
+
379
+ def calculate_final_scores(self, results_with_all_scores):
380
+ final_results = []
381
+ for idx, cross_score, bm25_score, intent_score in results_with_all_scores:
382
+ normalized_cross = max(0, min(1, cross_score))
383
+ final_score = normalized_cross + bm25_score + intent_score
384
+ final_results.append({
385
+ 'index': idx, 'cross_encoder_score': normalized_cross,
386
+ 'bm25_score': bm25_score, 'intent_score': intent_score,
387
+ 'final_score': final_score
388
+ })
389
+ final_results.sort(key=lambda x: x['final_score'], reverse=True)
390
+ return final_results
391
+
392
+ def extract_skills(self, text, job_description):
393
+ # ... (implementation)
394
+ if not text: return []
395
+ common_skills = ["python", "java", "javascript", "react", "angular", "vue", "node.js", "express", "django", "flask", "spring", "sql", "nosql", "html", "css", "aws", "azure", "gcp", "docker", "kubernetes", "jenkins", "git", "github", "agile", "scrum", "jira", "ci/cd", "devops", "microservices", "rest", "api", "machine learning", "deep learning", "data science", "artificial intelligence", "tensorflow", "pytorch", "keras", "scikit-learn", "pandas", "numpy", "matplotlib", "seaborn", "jupyter", "r", "sas", "spss", "tableau", "powerbi", "excel", "mysql", "postgresql", "mongodb", "redis", "elasticsearch", "kafka", "rabbitmq", "spark", "hadoop", "hive", "airflow", "linux", "unix"]
396
+ job_words = set(word.lower() for word in word_tokenize(job_description) if len(word) > 2)
397
+ found_skills = []
398
+ text_lower = text.lower()
399
+ for skill in common_skills:
400
+ if skill in text_lower and any(skill in job_word for job_word in job_words):
401
+ found_skills.append(skill)
402
+ for word in job_words:
403
+ if len(word) > 3 and word in text_lower and word not in found_skills and word not in ['with', 'have', 'that', 'this', 'from', 'what', 'when', 'where']:
404
+ found_skills.append(word)
405
+ return list(set(found_skills))[:15]
406
+
407
+ def create_download_link(df, filename="resume_screening_results.csv"):
408
+ # ... (implementation)
409
+ csv = df.to_csv(index=False)
410
+ b64 = base64.b64encode(csv.encode()).decode()
411
+ return f'<a href="data:file/csv;base64,{b64}" download="{filename}" class="download-btn">πŸ“₯ Download Results CSV</a>'
412
+
413
+ # --- Sidebar Configuration (Must be after global model loading and class defs if it uses them) ---
414
  with st.sidebar:
415
  st.title("βš™οΈ Configuration")
 
416
  # Advanced options
417
  st.subheader("Advanced Options")
418
+ # Ensure top_k is in session_state if it's used by advanced_pipeline_ranking before button press
419
+ if 'top_k' not in st.session_state:
420
+ st.session_state.top_k = 5 # Default value
421
+ st.session_state.top_k = st.selectbox("Number of results to display", [1,2,3,4,5], index=st.session_state.top_k-1, key="top_k_selector")
422
 
423
  # LLM Settings
424
  st.subheader("LLM Settings")
425
+ # use_llm_explanations = st.checkbox("Generate AI Explanations", value=True) # This was removed earlier
426
+ # if use_llm_explanations:
427
+ # hf_token = st.text_input("Hugging Face Token (optional)", type="password",
428
+ # help="Enter your HF token for better rate limits")
429
 
430
  st.markdown("---")
431
  st.markdown("### πŸ€– Advanced Pipeline")
432
  st.markdown("- **Stage 1**: FAISS Recall (Top 50)")
433
  st.markdown("- **Stage 2**: Cross-Encoder Re-ranking (Top 20)")
434
  st.markdown("- **Stage 3**: BM25 Keyword Matching")
435
+ st.markdown("- **Stage 4**: LLM Intent Analysis (Qwen3-1.7B)")
436
+ st.markdown("- **Final**: Combined Scoring") # Updated this line
437
  st.markdown("### πŸ“Š Models Used")
438
  st.markdown("- **Embedding**: BAAI/bge-large-en-v1.5")
439
  st.markdown("- **Cross-Encoder**: ms-marco-MiniLM-L6-v2")
 
441
  st.markdown("### πŸ“ˆ Scoring Formula")
442
  st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**")
443
 
444
+ # --- Main App Interface (Must be after global model loading and class defs) ---
445
+ st.title("🎯 AI-Powered Resume Screener")
446
+ # ... (Model Loading Status display as before)
447
+ # ...
448
+ st.markdown("*Find the perfect candidates using BAAI/bge-large-en-v1.5 embeddings and Qwen3-1.7B for intent analysis*")
449
+
450
  st.subheader("πŸ€– Model Loading Status")
451
  col1, col2 = st.columns(2)
452
  with col1:
 
456
  st.success("βœ… Embedding Model (BAAI/bge-large-en-v1.5) loaded.")
457
  else:
458
  st.warning("⏳ Embedding Model loading or not found (check console).")
 
459
  if st.session_state.get('cross_encoder_error'):
460
  st.error(f"Cross-Encoder Model: {st.session_state.cross_encoder_error}")
461
  elif st.session_state.get('cross_encoder'):
 
469
  st.success("βœ… Qwen3-1.7B Tokenizer loaded.")
470
  else:
471
  st.warning("⏳ Qwen3-1.7B Tokenizer loading or not found (check console).")
 
472
  if st.session_state.get('qwen3_1_7b_model_error'):
473
  st.error(f"Qwen3-1.7B Model: {st.session_state.qwen3_1_7b_model_error}")
474
  elif st.session_state.get('qwen3_1_7b_model'):
 
477
  st.warning("⏳ Qwen3-1.7B Model loading or not found (check console).")
478
  st.markdown("---")
479
 
480
+ # Initialize screener (This line was causing NameError, ensure class is defined above)
481
  screener = ResumeScreener()
482
 
483
  # Job Description Input