root commited on
Commit
3adcf09
Β·
1 Parent(s): c71ed9b
Files changed (1) hide show
  1. app.py +107 -61
app.py CHANGED
@@ -15,7 +15,7 @@ from docx import Document
15
  import csv
16
  from datasets import load_dataset
17
  import gc
18
- from transformers import AutoModelForCausalLM, AutoTokenizer
19
  import time
20
  import faiss
21
  import re
@@ -40,8 +40,8 @@ st.set_page_config(
40
  keys_to_initialize = {
41
  'embedding_model': None, 'embedding_model_error': None,
42
  'cross_encoder': None, 'cross_encoder_error': None,
43
- 'qwen3_1_7b_tokenizer': None, 'qwen3_1_7b_tokenizer_error': None,
44
- 'qwen3_1_7b_model': None, 'qwen3_1_7b_model_error': None,
45
  'results': [], 'resume_texts': [], 'file_names': [], 'current_job_description': ""
46
  # Add any other app-specific session state keys here if needed
47
  }
@@ -59,17 +59,18 @@ if st.session_state.embedding_model is None and st.session_state.embedding_model
59
  )
60
  print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED with device_map='auto'.")
61
  except Exception as e:
62
- if "device_map" in str(e).lower() and "unexpected keyword argument" in str(e).lower():
 
63
  print("⚠️ [Global Init] device_map='auto' not supported for SentenceTransformer. Falling back to default device handling.")
64
  try:
65
  st.session_state.embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5')
66
  print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED (fallback device handling).")
67
  except Exception as e_fallback:
68
- error_msg = f"Failed to load Embedding Model (fallback): {str(e_fallback)}"
69
  print(f"❌ [Global Init] {error_msg}")
70
  st.session_state.embedding_model_error = error_msg
71
  else:
72
- error_msg = f"Failed to load Embedding Model: {str(e)}"
73
  print(f"❌ [Global Init] {error_msg}")
74
  st.session_state.embedding_model_error = error_msg
75
 
@@ -83,57 +84,69 @@ if st.session_state.cross_encoder is None and st.session_state.cross_encoder_err
83
  )
84
  print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED with device_map='auto'.")
85
  except Exception as e:
86
- if "device_map" in str(e).lower() and "unexpected keyword argument" in str(e).lower():
 
87
  print("⚠️ [Global Init] device_map='auto' not supported for CrossEncoder. Falling back to default device handling.")
88
  try:
89
  st.session_state.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2')
90
  print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED (fallback device handling).")
91
  except Exception as e_fallback:
92
- error_msg = f"Failed to load Cross-Encoder Model (fallback): {str(e_fallback)}"
93
  print(f"❌ [Global Init] {error_msg}")
94
  st.session_state.cross_encoder_error = error_msg
95
  else:
96
- error_msg = f"Failed to load Cross-Encoder Model: {str(e)}"
97
  print(f"❌ [Global Init] {error_msg}")
98
  st.session_state.cross_encoder_error = error_msg
99
 
100
- # Load Qwen3-1.7B Tokenizer
101
- if st.session_state.qwen3_1_7b_tokenizer is None and st.session_state.qwen3_1_7b_tokenizer_error is None:
102
- print("[Global Init] Loading Qwen3-1.7B Tokenizer...")
103
  try:
104
- st.session_state.qwen3_1_7b_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-1.7B")
105
- print("[Global Init] Qwen3-1.7B Tokenizer Loaded.")
106
  except Exception as e:
107
- error_msg = f"Failed to load Qwen3-1.7B Tokenizer: {str(e)}"
108
  print(f"❌ [Global Init] {error_msg}")
109
- st.session_state.qwen3_1_7b_tokenizer_error = error_msg
110
 
111
- # Load Qwen3-1.7B Model
112
- if st.session_state.qwen3_1_7b_model is None and st.session_state.qwen3_1_7b_model_error is None:
113
- print("[Global Init] Loading Qwen3-1.7B Model (attempting with device_map='auto')...")
114
  try:
115
- st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
116
- "Qwen/Qwen3-1.7B",
117
- torch_dtype="auto",
 
 
 
 
 
 
 
 
118
  device_map="auto",
119
- trust_remote_code=True # if required by this specific model
 
 
120
  )
121
- print("[Global Init] Qwen3-1.7B Model Loaded with device_map='auto'.")
122
- except Exception as e_dev_map:
123
- print(f"⚠️ [Global Init] Failed to load Qwen3-1.7B with device_map='auto': {str(e_dev_map)}")
124
- print("[Global Init] Retrying Qwen3-1.7B load without device_map (will use default single device)...")
 
125
  try:
126
- st.session_state.qwen3_1_7b_model = AutoModelForCausalLM.from_pretrained(
127
- "Qwen/Qwen3-1.7B",
128
  torch_dtype="auto",
129
- # No device_map here, let Hugging Face decide or use CUDA if available
130
- trust_remote_code=True # if required
131
  )
132
- print("[Global Init] Qwen3-1.7B Model Loaded (fallback device handling).")
133
  except Exception as e_fallback:
134
- error_msg = f"Failed to load Qwen3-1.7B Model (fallback): {str(e_fallback)}"
135
  print(f"❌ [Global Init] {error_msg}")
136
- st.session_state.qwen3_1_7b_model_error = error_msg
137
 
138
  # --- End of Global Model Loading Section ---
139
 
@@ -248,7 +261,7 @@ class ResumeScreener: # Ensure this class definition is BEFORE it's instantiated
248
  top_20_results = self.cross_encoder_rerank(resume_texts, job_description, top_50_indices, top_k=20)
249
  st.info("πŸ”€ Stage 3: BM25 Keyword Matching...")
250
  top_20_with_bm25 = self.add_bm25_scores(resume_texts, job_description, top_20_results)
251
- st.info("πŸ€– Stage 4: LLM Intent Analysis (Qwen3-1.7B)...")
252
  top_20_with_intent = self.add_intent_scores(resume_texts, job_description, top_20_with_bm25)
253
  st.info("πŸ† Stage 5: Final Combined Ranking...")
254
  final_results = self.calculate_final_scores(top_20_with_intent)
@@ -331,7 +344,7 @@ class ResumeScreener: # Ensure this class definition is BEFORE it's instantiated
331
  return results_with_bm25
332
 
333
  def add_intent_scores(self, resume_texts, job_description, top_20_with_bm25):
334
- st.text(f"LLM Intent: Analyzing intent for {len(top_20_with_bm25)} candidates (Qwen3-1.7B)...")
335
  results_with_intent = []
336
  progress_bar = st.progress(0)
337
  for i, (idx, cross_score, bm25_score) in enumerate(top_20_with_bm25):
@@ -342,20 +355,48 @@ class ResumeScreener: # Ensure this class definition is BEFORE it's instantiated
342
  return results_with_intent
343
 
344
  def analyze_intent(self, resume_text, job_description):
345
- print(f"[analyze_intent] Analyzing intent for one resume (Qwen3-1.7B)...")
346
- st.text("LLM Intent: Analyzing intent (Qwen3-1.7B)...")
347
  try:
348
  resume_snippet = resume_text[:15000]
349
  job_snippet = job_description[:5000]
350
- prompt = f\"\"\"You are given a job description and a candidate's resume... (rest of prompt)\"\"\" # Ensure f-string is correct
351
- # ... (rest of analyze_intent, using st.session_state.qwen3_1_7b_tokenizer and _model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
  response_text = generate_qwen3_response(
353
  prompt,
354
- st.session_state.qwen3_1_7b_tokenizer,
355
- st.session_state.qwen3_1_7b_model,
356
  max_new_tokens=20000
357
  )
358
- # ... (parsing logic for response_text) ...
 
359
  thinking_content = "No detailed thought process extracted."
360
  intent_decision_part = response_text
361
  think_start_tag = "<think>"
@@ -365,15 +406,20 @@ class ResumeScreener: # Ensure this class definition is BEFORE it's instantiated
365
  if start_index != -1 and end_index != -1 and start_index < end_index:
366
  thinking_content = response_text[start_index + len(think_start_tag):end_index].strip()
367
  intent_decision_part = response_text[end_index + len(think_end_tag):].strip()
 
368
  response_lower = intent_decision_part.lower()
369
- intent_score = 0.1
 
370
  if 'intent: yes' in response_lower or 'intent:yes' in response_lower:
371
  intent_score = 0.3
372
  elif 'intent: no' in response_lower or 'intent:no' in response_lower:
373
  intent_score = 0.0
 
 
 
374
  return intent_score
375
  except Exception as e:
376
- st.warning(f"Error analyzing intent with Qwen3-1.7B: {str(e)}")
377
  return 0.1
378
 
379
  def calculate_final_scores(self, results_with_all_scores):
@@ -432,12 +478,12 @@ with st.sidebar:
432
  st.markdown("- **Stage 1**: FAISS Recall (Top 50)")
433
  st.markdown("- **Stage 2**: Cross-Encoder Re-ranking (Top 20)")
434
  st.markdown("- **Stage 3**: BM25 Keyword Matching")
435
- st.markdown("- **Stage 4**: LLM Intent Analysis (Qwen3-1.7B)")
436
  st.markdown("- **Final**: Combined Scoring") # Updated this line
437
  st.markdown("### πŸ“Š Models Used")
438
  st.markdown("- **Embedding**: BAAI/bge-large-en-v1.5")
439
  st.markdown("- **Cross-Encoder**: ms-marco-MiniLM-L6-v2")
440
- st.markdown("- **LLM**: Qwen/Qwen3-1.7B")
441
  st.markdown("### πŸ“ˆ Scoring Formula")
442
  st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**")
443
 
@@ -445,7 +491,7 @@ with st.sidebar:
445
  st.title("🎯 AI-Powered Resume Screener")
446
  # ... (Model Loading Status display as before)
447
  # ...
448
- st.markdown("*Find the perfect candidates using BAAI/bge-large-en-v1.5 embeddings and Qwen3-1.7B for intent analysis*")
449
 
450
  st.subheader("πŸ€– Model Loading Status")
451
  col1, col2 = st.columns(2)
@@ -463,18 +509,18 @@ with col1:
463
  else:
464
  st.warning("⏳ Cross-Encoder Model loading or not found (check console).")
465
  with col2:
466
- if st.session_state.get('qwen3_1_7b_tokenizer_error'):
467
- st.error(f"Qwen3-1.7B Tokenizer: {st.session_state.qwen3_1_7b_tokenizer_error}")
468
- elif st.session_state.get('qwen3_1_7b_tokenizer'):
469
- st.success("βœ… Qwen3-1.7B Tokenizer loaded.")
470
  else:
471
- st.warning("⏳ Qwen3-1.7B Tokenizer loading or not found (check console).")
472
- if st.session_state.get('qwen3_1_7b_model_error'):
473
- st.error(f"Qwen3-1.7B Model: {st.session_state.qwen3_1_7b_model_error}")
474
- elif st.session_state.get('qwen3_1_7b_model'):
475
- st.success("βœ… Qwen3-1.7B Model loaded.")
476
  else:
477
- st.warning("⏳ Qwen3-1.7B Model loading or not found (check console).")
478
  st.markdown("---")
479
 
480
  # Initialize screener (This line was causing NameError, ensure class is defined above)
@@ -671,8 +717,8 @@ with col1:
671
  disabled=not (job_description and st.session_state.resume_texts and
672
  st.session_state.get('embedding_model') and
673
  st.session_state.get('cross_encoder') and
674
- st.session_state.get('qwen3_1_7b_model') and
675
- st.session_state.get('qwen3_1_7b_tokenizer')),
676
  type="primary",
677
  help="Run the complete 5-stage advanced pipeline"):
678
  print("--- Advanced Pipeline Analysis Button Clicked ---")
@@ -901,7 +947,7 @@ st.markdown("---")
901
  st.markdown(
902
  """
903
  <div style='text-align: center; color: #666;'>
904
- πŸš€ Powered by BAAI/bge-large-en-v1.5 & Qwen3-1.7B | Built with Streamlit
905
  </div>
906
  """,
907
  unsafe_allow_html=True
 
15
  import csv
16
  from datasets import load_dataset
17
  import gc
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
19
  import time
20
  import faiss
21
  import re
 
40
  keys_to_initialize = {
41
  'embedding_model': None, 'embedding_model_error': None,
42
  'cross_encoder': None, 'cross_encoder_error': None,
43
+ 'qwen3_4b_tokenizer': None, 'qwen3_4b_tokenizer_error': None,
44
+ 'qwen3_4b_model': None, 'qwen3_4b_model_error': None,
45
  'results': [], 'resume_texts': [], 'file_names': [], 'current_job_description': ""
46
  # Add any other app-specific session state keys here if needed
47
  }
 
59
  )
60
  print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED with device_map='auto'.")
61
  except Exception as e:
62
+ error_str = str(e) if e else ""
63
+ if "device_map" in error_str.lower() and "unexpected keyword argument" in error_str.lower():
64
  print("⚠️ [Global Init] device_map='auto' not supported for SentenceTransformer. Falling back to default device handling.")
65
  try:
66
  st.session_state.embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5')
67
  print(f"[Global Init] Embedding Model (BAAI/bge-large-en-v1.5) LOADED (fallback device handling).")
68
  except Exception as e_fallback:
69
+ error_msg = f"Failed to load Embedding Model (fallback): {str(e_fallback) if e_fallback else 'Unknown error'}"
70
  print(f"❌ [Global Init] {error_msg}")
71
  st.session_state.embedding_model_error = error_msg
72
  else:
73
+ error_msg = f"Failed to load Embedding Model: {error_str}"
74
  print(f"❌ [Global Init] {error_msg}")
75
  st.session_state.embedding_model_error = error_msg
76
 
 
84
  )
85
  print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED with device_map='auto'.")
86
  except Exception as e:
87
+ error_str = str(e) if e else ""
88
+ if "device_map" in error_str.lower() and "unexpected keyword argument" in error_str.lower():
89
  print("⚠️ [Global Init] device_map='auto' not supported for CrossEncoder. Falling back to default device handling.")
90
  try:
91
  st.session_state.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2')
92
  print(f"[Global Init] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED (fallback device handling).")
93
  except Exception as e_fallback:
94
+ error_msg = f"Failed to load Cross-Encoder Model (fallback): {str(e_fallback) if e_fallback else 'Unknown error'}"
95
  print(f"❌ [Global Init] {error_msg}")
96
  st.session_state.cross_encoder_error = error_msg
97
  else:
98
+ error_msg = f"Failed to load Cross-Encoder Model: {error_str}"
99
  print(f"❌ [Global Init] {error_msg}")
100
  st.session_state.cross_encoder_error = error_msg
101
 
102
+ # Load Qwen3-4B Tokenizer
103
+ if st.session_state.qwen3_4b_tokenizer is None and st.session_state.qwen3_4b_tokenizer_error is None:
104
+ print("[Global Init] Loading Qwen3-4B Tokenizer...")
105
  try:
106
+ st.session_state.qwen3_4b_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B")
107
+ print("[Global Init] Qwen3-4B Tokenizer Loaded.")
108
  except Exception as e:
109
+ error_msg = f"Failed to load Qwen3-4B Tokenizer: {str(e) if e else 'Unknown error'}"
110
  print(f"❌ [Global Init] {error_msg}")
111
+ st.session_state.qwen3_4b_tokenizer_error = error_msg
112
 
113
+ # Load Qwen3-4B Model with 4-bit quantization
114
+ if st.session_state.qwen3_4b_model is None and st.session_state.qwen3_4b_model_error is None:
115
+ print("[Global Init] Loading Qwen3-4B Model with 4-bit quantization...")
116
  try:
117
+ # Configure 4-bit quantization for better performance and memory efficiency
118
+ quantization_config = BitsAndBytesConfig(
119
+ load_in_4bit=True,
120
+ bnb_4bit_quant_type="nf4",
121
+ bnb_4bit_compute_dtype=torch.float16,
122
+ bnb_4bit_use_double_quant=True
123
+ )
124
+
125
+ st.session_state.qwen3_4b_model = AutoModelForCausalLM.from_pretrained(
126
+ "Qwen/Qwen3-4B",
127
+ quantization_config=quantization_config,
128
  device_map="auto",
129
+ trust_remote_code=True,
130
+ torch_dtype=torch.float16,
131
+ use_cache=True
132
  )
133
+ print("[Global Init] Qwen3-4B Model Loaded with 4-bit quantization and device_map='auto'.")
134
+ except Exception as e_quant:
135
+ error_str = str(e_quant) if e_quant else ""
136
+ print(f"⚠️ [Global Init] Failed to load Qwen3-4B with 4-bit quantization: {error_str}")
137
+ print("[Global Init] Retrying Qwen3-4B load without quantization...")
138
  try:
139
+ st.session_state.qwen3_4b_model = AutoModelForCausalLM.from_pretrained(
140
+ "Qwen/Qwen3-4B",
141
  torch_dtype="auto",
142
+ device_map="auto",
143
+ trust_remote_code=True
144
  )
145
+ print("[Global Init] Qwen3-4B Model Loaded without quantization.")
146
  except Exception as e_fallback:
147
+ error_msg = f"Failed to load Qwen3-4B Model (fallback): {str(e_fallback) if e_fallback else 'Unknown error'}"
148
  print(f"❌ [Global Init] {error_msg}")
149
+ st.session_state.qwen3_4b_model_error = error_msg
150
 
151
  # --- End of Global Model Loading Section ---
152
 
 
261
  top_20_results = self.cross_encoder_rerank(resume_texts, job_description, top_50_indices, top_k=20)
262
  st.info("πŸ”€ Stage 3: BM25 Keyword Matching...")
263
  top_20_with_bm25 = self.add_bm25_scores(resume_texts, job_description, top_20_results)
264
+ st.info("πŸ€– Stage 4: LLM Intent Analysis (Qwen3-4B)...")
265
  top_20_with_intent = self.add_intent_scores(resume_texts, job_description, top_20_with_bm25)
266
  st.info("πŸ† Stage 5: Final Combined Ranking...")
267
  final_results = self.calculate_final_scores(top_20_with_intent)
 
344
  return results_with_bm25
345
 
346
  def add_intent_scores(self, resume_texts, job_description, top_20_with_bm25):
347
+ st.text(f"LLM Intent: Analyzing intent for {len(top_20_with_bm25)} candidates (Qwen3-4B)...")
348
  results_with_intent = []
349
  progress_bar = st.progress(0)
350
  for i, (idx, cross_score, bm25_score) in enumerate(top_20_with_bm25):
 
355
  return results_with_intent
356
 
357
  def analyze_intent(self, resume_text, job_description):
358
+ print(f"[analyze_intent] Analyzing intent for one resume (Qwen3-4B)...")
359
+ st.text("LLM Intent: Analyzing intent (Qwen3-4B)...")
360
  try:
361
  resume_snippet = resume_text[:15000]
362
  job_snippet = job_description[:5000]
363
+
364
+ prompt = f"""You are given a job description and a candidate's resume. Your task is to analyze whether the candidate is likely seeking this specific type of job.
365
+
366
+ Job Description:
367
+ {job_snippet}
368
+
369
+ Candidate Resume:
370
+ {resume_snippet}
371
+
372
+ Please analyze the candidate's background, skills, experience, and career trajectory to determine if they would be genuinely interested in and likely to apply for this position.
373
+
374
+ Consider:
375
+ 1. Does their experience align with the job requirements?
376
+ 2. Is this a logical career progression for them?
377
+ 3. Do their skills match what's needed?
378
+ 4. Would this role be appealing given their background?
379
+
380
+ Think through your analysis step by step, then provide your final assessment.
381
+
382
+ Respond with exactly one of these formats:
383
+ - Intent: Yes (if they would likely seek this job)
384
+ - Intent: Maybe (if it's uncertain or partially aligned)
385
+ - Intent: No (if they would likely not seek this job)"""
386
+
387
+ # Check if models are available
388
+ if not st.session_state.get('qwen3_4b_tokenizer') or not st.session_state.get('qwen3_4b_model'):
389
+ st.warning("Qwen3-4B model not available, using fallback intent score.")
390
+ return 0.1
391
+
392
  response_text = generate_qwen3_response(
393
  prompt,
394
+ st.session_state.qwen3_4b_tokenizer,
395
+ st.session_state.qwen3_4b_model,
396
  max_new_tokens=20000
397
  )
398
+
399
+ # Parse thinking content and intent decision
400
  thinking_content = "No detailed thought process extracted."
401
  intent_decision_part = response_text
402
  think_start_tag = "<think>"
 
406
  if start_index != -1 and end_index != -1 and start_index < end_index:
407
  thinking_content = response_text[start_index + len(think_start_tag):end_index].strip()
408
  intent_decision_part = response_text[end_index + len(think_end_tag):].strip()
409
+
410
  response_lower = intent_decision_part.lower()
411
+ intent_score = 0.1 # Default "Maybe" score
412
+
413
  if 'intent: yes' in response_lower or 'intent:yes' in response_lower:
414
  intent_score = 0.3
415
  elif 'intent: no' in response_lower or 'intent:no' in response_lower:
416
  intent_score = 0.0
417
+ elif 'intent: maybe' in response_lower or 'intent:maybe' in response_lower:
418
+ intent_score = 0.1
419
+
420
  return intent_score
421
  except Exception as e:
422
+ st.warning(f"Error analyzing intent with Qwen3-4B: {str(e)}")
423
  return 0.1
424
 
425
  def calculate_final_scores(self, results_with_all_scores):
 
478
  st.markdown("- **Stage 1**: FAISS Recall (Top 50)")
479
  st.markdown("- **Stage 2**: Cross-Encoder Re-ranking (Top 20)")
480
  st.markdown("- **Stage 3**: BM25 Keyword Matching")
481
+ st.markdown("- **Stage 4**: LLM Intent Analysis (Qwen3-4B)")
482
  st.markdown("- **Final**: Combined Scoring") # Updated this line
483
  st.markdown("### πŸ“Š Models Used")
484
  st.markdown("- **Embedding**: BAAI/bge-large-en-v1.5")
485
  st.markdown("- **Cross-Encoder**: ms-marco-MiniLM-L6-v2")
486
+ st.markdown("- **LLM**: Qwen/Qwen3-4B (4-bit quantized)")
487
  st.markdown("### πŸ“ˆ Scoring Formula")
488
  st.markdown("**Final Score = Cross-Encoder (0-1) + BM25 (0.1-0.2) + Intent (0-0.3)**")
489
 
 
491
  st.title("🎯 AI-Powered Resume Screener")
492
  # ... (Model Loading Status display as before)
493
  # ...
494
+ st.markdown("*Find the perfect candidates using BAAI/bge-large-en-v1.5 embeddings and Qwen3-4B for intent analysis*")
495
 
496
  st.subheader("πŸ€– Model Loading Status")
497
  col1, col2 = st.columns(2)
 
509
  else:
510
  st.warning("⏳ Cross-Encoder Model loading or not found (check console).")
511
  with col2:
512
+ if st.session_state.get('qwen3_4b_tokenizer_error'):
513
+ st.error(f"Qwen3-4B Tokenizer: {st.session_state.qwen3_4b_tokenizer_error}")
514
+ elif st.session_state.get('qwen3_4b_tokenizer'):
515
+ st.success("βœ… Qwen3-4B Tokenizer loaded.")
516
  else:
517
+ st.warning("⏳ Qwen3-4B Tokenizer loading or not found (check console).")
518
+ if st.session_state.get('qwen3_4b_model_error'):
519
+ st.error(f"Qwen3-4B Model: {st.session_state.qwen3_4b_model_error}")
520
+ elif st.session_state.get('qwen3_4b_model'):
521
+ st.success("βœ… Qwen3-4B Model loaded (4-bit quantized).")
522
  else:
523
+ st.warning("⏳ Qwen3-4B Model loading or not found (check console).")
524
  st.markdown("---")
525
 
526
  # Initialize screener (This line was causing NameError, ensure class is defined above)
 
717
  disabled=not (job_description and st.session_state.resume_texts and
718
  st.session_state.get('embedding_model') and
719
  st.session_state.get('cross_encoder') and
720
+ st.session_state.get('qwen3_4b_model') and
721
+ st.session_state.get('qwen3_4b_tokenizer')),
722
  type="primary",
723
  help="Run the complete 5-stage advanced pipeline"):
724
  print("--- Advanced Pipeline Analysis Button Clicked ---")
 
947
  st.markdown(
948
  """
949
  <div style='text-align: center; color: #666;'>
950
+ πŸš€ Powered by BAAI/bge-large-en-v1.5 & Qwen3-4B (4-bit) | Built with Streamlit
951
  </div>
952
  """,
953
  unsafe_allow_html=True