CR7CAD commited on
Commit
9de41cb
·
verified ·
1 Parent(s): 17a94ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -150
app.py CHANGED
@@ -167,185 +167,153 @@ def basic_summarize(text, max_length=100):
167
  summary = " ".join(summary_sentences)
168
  return summary
169
 
170
- # Custom classification function for job fit assessment
171
  def evaluate_job_fit(resume_summary, job_requirements, models):
172
  """
173
- Use the sentiment model to evaluate job fit with multiple analyses
174
  """
175
  start_time = time.time()
176
 
177
- # We'll run multiple comparisons to get a more robust assessment
178
-
179
- # Prepare required information
180
- resume_lower = resume_summary.lower()
181
  required_skills = job_requirements["required_skills"]
182
  years_required = job_requirements["years_experience"]
183
  job_title = job_requirements["title"]
184
  job_summary = job_requirements["summary"]
185
 
186
- # Extract skills mentioned in resume
187
- skills_in_resume = []
188
- for skill in required_skills:
189
- if skill.lower() in resume_lower:
190
- skills_in_resume.append(skill)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
- # Skills match percentage
193
- skills_match_percentage = int((len(skills_in_resume) / max(1, len(required_skills))) * 100)
 
 
194
 
195
- # Extract years of experience from resume
196
- experience_years = 0
197
- year_patterns = [
198
- r'(\d+)\s*(?:\+)?\s*years?\s*(?:of)?\s*experience',
199
- r'experience\s*(?:of)?\s*(\d+)\s*(?:\+)?\s*years?'
200
- ]
201
 
202
- for pattern in year_patterns:
203
- exp_match = re.search(pattern, resume_lower)
204
- if exp_match:
205
- try:
206
- experience_years = int(exp_match.group(1))
207
- break
208
- except:
209
- pass
210
 
211
- # If we couldn't find explicit years, try to count based on work history
212
- if experience_years == 0:
213
- # Try to extract from work experience section
214
- work_exp_match = re.search(r'work experience:(.*?)(?=\n\n|$)', resume_summary, re.IGNORECASE | re.DOTALL)
215
- if work_exp_match:
216
- work_text = work_exp_match.group(1).lower()
217
- years = re.findall(r'(\d{4})\s*-\s*(\d{4}|present|current)', work_text)
 
 
 
 
 
 
 
 
 
 
 
218
 
219
- total_years = 0
220
- for year_range in years:
221
- start_year = int(year_range[0])
222
- if year_range[1].isdigit():
223
- end_year = int(year_range[1])
224
- else:
225
- end_year = 2025 # Assume "present" is current year
226
-
227
- total_years += (end_year - start_year)
228
 
229
- experience_years = total_years
230
-
231
- # Check experience match
232
- experience_match = "sufficient" if experience_years >= years_required else "insufficient"
233
-
234
- # Create multiple comparison texts to evaluate from different angles
235
- # Each formatted to bias the sentiment model in a different way
236
-
237
- # 1. Skill-focused comparison
238
- skill_comparison = f"""
239
- Required skills for {job_title}: {', '.join(required_skills)}
240
-
241
- Skills found in candidate resume: {', '.join(skills_in_resume)}
242
-
243
- The candidate possesses {len(skills_in_resume)} out of {len(required_skills)} required skills ({skills_match_percentage}%).
244
-
245
- Based on skills alone, the candidate is {'well-qualified' if skills_match_percentage >= 70 else 'partially qualified' if skills_match_percentage >= 50 else 'not well qualified'} for this position.
 
 
 
 
246
  """
 
 
247
 
248
- # 2. Experience-focused comparison
249
- experience_comparison = f"""
250
- The {job_title} position requires {years_required} years of experience.
251
-
252
- The candidate has approximately {experience_years} years of experience.
253
 
254
- Based on experience alone, the candidate {'meets' if experience_years >= years_required else 'does not meet'} the experience requirements for this position.
255
  """
 
 
256
 
257
- # 3. Overall job fit comparison
258
- overall_comparison = f"""
259
- Job: {job_title}
260
-
261
- Job description summary: {job_summary}
262
-
263
- Candidate summary: {resume_summary[:300]}
264
 
265
- Skills match: {skills_match_percentage}%
266
- Experience match: {experience_years}/{years_required} years
267
-
268
- Overall assessment: The candidate's profile {'appears to fit' if skills_match_percentage >= 60 and experience_match == "sufficient" else 'has some gaps compared to'} the key requirements for this position.
269
  """
270
-
271
- # Now we'll analyze each comparison using the sentiment model
272
- # This is deliberately more thorough to ensure the model is actually doing work
273
-
274
- # Function to get sentiment score with a consistent interface
275
- def get_sentiment(text):
276
- """Get sentiment score (1 for positive, 0 for negative)"""
277
- if has_pipeline and 'evaluator' in models:
278
- try:
279
- # Add deliberate sleep to ensure the model has time to process
280
- time.sleep(0.5) # Add small delay to ensure model runs
281
- result = models['evaluator'](text)
282
- return 1 if result[0]['label'] == 'POSITIVE' else 0
283
- except Exception as e:
284
- st.warning(f"Error in pipeline sentiment analysis: {e}")
285
-
286
- # Fall back to manual model inference
287
- if 'evaluator_model' in models and 'evaluator_tokenizer' in models and models['evaluator_model']:
288
- try:
289
- tokenizer = models['evaluator_tokenizer']
290
- model = models['evaluator_model']
291
-
292
- # Add deliberate sleep to ensure the model has time to process
293
- time.sleep(0.5) # Add small delay to ensure model runs
294
-
295
- # Truncate to avoid exceeding model's max length
296
- max_length = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') else 512
297
- truncated_text = " ".join(text.split()[:max_length])
298
-
299
- inputs = tokenizer(truncated_text, return_tensors="pt", truncation=True, max_length=max_length)
300
- with torch.no_grad():
301
- outputs = model(**inputs)
302
-
303
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
304
- prediction = torch.argmax(probabilities, dim=-1).item()
305
-
306
- # Usually for sentiment models, 1 = positive, 0 = negative
307
- return 1 if prediction == 1 else 0
308
- except Exception as e:
309
- st.warning(f"Error in manual sentiment analysis: {e}")
310
-
311
- # Fallback to keyword approach
312
- positive_words = ["match", "fit", "qualified", "skilled", "experienced", "suitable", "aligned", "good", "strong"]
313
- negative_words = ["mismatch", "gap", "insufficient", "lacking", "inadequate", "limited", "missing", "poor", "weak"]
314
-
315
- text_lower = text.lower()
316
- positive_count = sum(text_lower.count(word) for word in positive_words)
317
- negative_count = sum(text_lower.count(word) for word in negative_words)
318
-
319
- return 1 if positive_count > negative_count else 0
320
-
321
- # Analyze each comparison (this will take time, which is good)
322
- skills_score = get_sentiment(skill_comparison)
323
- experience_score = get_sentiment(experience_comparison)
324
- overall_score = get_sentiment(overall_comparison)
325
-
326
- # Calculate a weighted combined score
327
- # Skills: 50%, Experience: 30%, Overall: 20%
328
- combined_score = skills_score * 0.5 + experience_score * 0.3 + overall_score * 0.2
329
-
330
- # Now determine the final score (0, 1, or 2)
331
- if combined_score >= 0.7 and skills_match_percentage >= 70 and experience_match == "sufficient":
332
- final_score = 2 # Good fit
333
- elif combined_score >= 0.4 or (skills_match_percentage >= 50 and experience_match == "sufficient"):
334
- final_score = 1 # Potential fit
335
  else:
336
- final_score = 0 # Not fit
337
 
338
- # Generate concise assessment text based on the score
339
- if final_score == 2:
340
- assessment = f"{final_score}: Skills match {skills_match_percentage}%, Experience match {experience_years}/{years_required} yrs. Strong technical alignment with {len(skills_in_resume)}/{len(required_skills)} required skills."
341
- elif final_score == 1:
342
- assessment = f"{final_score}: Skills match {skills_match_percentage}%, Experience {experience_match}. Meets some requirements but has gaps in {len(required_skills) - len(skills_in_resume)} skill areas."
 
 
 
 
 
 
343
  else:
344
- assessment = f"{final_score}: Skills match only {skills_match_percentage}%, Experience {experience_match}. Significant gaps in critical requirements for this position."
345
 
346
  execution_time = time.time() - start_time
347
 
348
- return assessment, final_score, execution_time
349
 
350
  #####################################
351
  # Function: Extract Text from File
 
167
  summary = " ".join(summary_sentences)
168
  return summary
169
 
170
+ # Custom classification function for comprehensive job fit assessment
171
  def evaluate_job_fit(resume_summary, job_requirements, models):
172
  """
173
+ Use model to evaluate job fit with comprehensive analysis across multiple dimensions
174
  """
175
  start_time = time.time()
176
 
177
+ # Extract basic information for context
 
 
 
178
  required_skills = job_requirements["required_skills"]
179
  years_required = job_requirements["years_experience"]
180
  job_title = job_requirements["title"]
181
  job_summary = job_requirements["summary"]
182
 
183
+ # Create a comprehensive analysis prompt for the model to evaluate
184
+ analysis_prompt = f"""
185
+ RESUME SUMMARY:
186
+ {resume_summary}
187
+
188
+ JOB DESCRIPTION:
189
+ Title: {job_title}
190
+ Required experience: {years_required} years
191
+ Required skills: {', '.join(required_skills)}
192
+ Description: {job_summary}
193
+
194
+ TASK: Analyze how well the candidate matches this job based on:
195
+ 1. Technical skills match
196
+ 2. Experience level match
197
+ 3. Role/position alignment
198
+ 4. Industry familiarity
199
+ 5. Potential for success in this position
200
+
201
+ Assign a score from 0-2 where:
202
+ 0 = NOT FIT (major gaps in requirements)
203
+ 1 = POTENTIAL FIT (meets some key requirements)
204
+ 2 = GOOD FIT (meets most or all key requirements)
205
+ """
206
 
207
+ # Truncate prompt if needed to fit model's input limits
208
+ max_prompt_length = 1024 # Set a reasonable limit
209
+ if len(analysis_prompt) > max_prompt_length:
210
+ analysis_prompt = analysis_prompt[:max_prompt_length]
211
 
212
+ # Use sentiment analysis model for evaluation
213
+ # This is a smart use of a simple model - we're phrasing our prompt
214
+ # so that a positive sentiment = good match, negative sentiment = poor match
 
 
 
215
 
216
+ fit_score = 0 # Default score
217
+ fit_assessment = ""
 
 
 
 
 
 
218
 
219
+ # Run multiple sub-analyses to build confidence in our result
220
+ sub_analyses = []
221
+
222
+ # Function to run model evaluation
223
+ def run_model_evaluation(prompt_text):
224
+ if has_pipeline and 'evaluator' in models:
225
+ result = models['evaluator'](prompt_text)
226
+ # Convert sentiment to score
227
+ if result[0]['label'] == 'POSITIVE' and result[0]['score'] > 0.9:
228
+ return 2 # Strong positive = good fit
229
+ elif result[0]['label'] == 'POSITIVE':
230
+ return 1 # Positive but not strong = potential fit
231
+ else:
232
+ return 0 # Negative = not fit
233
+ else:
234
+ # Manual implementation if pipeline not available
235
+ tokenizer = models['evaluator_tokenizer']
236
+ model = models['evaluator_model']
237
 
238
+ # Truncate to avoid exceeding model's max length
239
+ max_length = tokenizer.model_max_length if hasattr(tokenizer, 'model_max_length') else 512
240
+ truncated_text = " ".join(prompt_text.split()[:max_length])
 
 
 
 
 
 
241
 
242
+ inputs = tokenizer(truncated_text, return_tensors="pt", truncation=True, max_length=max_length)
243
+ with torch.no_grad():
244
+ outputs = model(**inputs)
245
+
246
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
247
+ positive_prob = probabilities[0][1].item() # Positive class probability
248
+
249
+ # Convert probability to score
250
+ if positive_prob > 0.9:
251
+ return 2
252
+ elif positive_prob > 0.6:
253
+ return 1
254
+ else:
255
+ return 0
256
+
257
+ # Run skills analysis
258
+ skills_prompt = f"""
259
+ RESUME SKILLS: {resume_summary}
260
+ JOB REQUIRED SKILLS: {', '.join(required_skills)}
261
+
262
+ Does the candidate have most of the required technical skills for this position?
263
  """
264
+ skills_score = run_model_evaluation(skills_prompt)
265
+ sub_analyses.append(skills_score)
266
 
267
+ # Run experience analysis
268
+ experience_prompt = f"""
269
+ RESUME EXPERIENCE: {resume_summary}
270
+ JOB REQUIRED EXPERIENCE: {years_required} years in {job_title}
 
271
 
272
+ Does the candidate have sufficient years of relevant experience for this position?
273
  """
274
+ experience_score = run_model_evaluation(experience_prompt)
275
+ sub_analyses.append(experience_score)
276
 
277
+ # Run role alignment analysis
278
+ role_prompt = f"""
279
+ CANDIDATE PROFILE: {resume_summary}
280
+ JOB ROLE: {job_title}, {job_summary}
 
 
 
281
 
282
+ Is the candidate's background well-aligned with this job role and responsibilities?
 
 
 
283
  """
284
+ role_score = run_model_evaluation(role_prompt)
285
+ sub_analyses.append(role_score)
286
+
287
+ # Calculate overall score (weighted average)
288
+ # Skills: 40%, Experience: 30%, Role alignment: 30%
289
+ weights = [0.4, 0.3, 0.3]
290
+ weighted_score = sum(score * weight for score, weight in zip(sub_analyses, weights))
291
+
292
+ # Convert to integer score (0-2)
293
+ if weighted_score >= 1.5:
294
+ fit_score = 2
295
+ elif weighted_score >= 0.8:
296
+ fit_score = 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
  else:
298
+ fit_score = 0
299
 
300
+ # Count matching skills for detailed assessment
301
+ resume_lower = resume_summary.lower()
302
+ matching_skills = [skill for skill in required_skills if skill.lower() in resume_lower]
303
+ missing_skills = [skill for skill in required_skills if skill.lower() not in resume_lower]
304
+ skills_match_percentage = int(len(matching_skills) / max(1, len(required_skills)) * 100)
305
+
306
+ # Generate assessment text based on score
307
+ if fit_score == 2:
308
+ fit_assessment = f"{fit_score}: Strong match with {skills_match_percentage}% skill alignment and suitable experience for {job_title}. Candidate demonstrates relevant background and meets key requirements."
309
+ elif fit_score == 1:
310
+ fit_assessment = f"{fit_score}: Potential match with {skills_match_percentage}% skill alignment. Candidate meets some requirements for {job_title} but may have gaps in {', '.join(missing_skills[:3])}{'...' if len(missing_skills) > 3 else ''}."
311
  else:
312
+ fit_assessment = f"{fit_score}: Limited match with only {skills_match_percentage}% skill alignment for {job_title}. Significant gaps in required skills and experience suggests this may not be the right fit."
313
 
314
  execution_time = time.time() - start_time
315
 
316
+ return fit_assessment, fit_score, execution_time
317
 
318
  #####################################
319
  # Function: Extract Text from File