CR7CAD commited on
Commit
e472708
·
verified ·
1 Parent(s): d7b9f34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -364
app.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import pandas as pd
10
  from functools import lru_cache
11
 
12
- # Try different import approaches
13
  try:
14
  from transformers import pipeline
15
  has_pipeline = True
@@ -20,21 +20,11 @@ except ImportError:
20
  st.warning("Using basic transformers functionality instead of pipeline API")
21
 
22
  # Set page title and hide sidebar
23
- st.set_page_config(
24
- page_title="Resume-Job Fit Analyzer",
25
- initial_sidebar_state="collapsed"
26
- )
27
-
28
- # Hide sidebar completely with custom CSS
29
- st.markdown("""
30
- <style>
31
- [data-testid="collapsedControl"] {display: none;}
32
- section[data-testid="stSidebar"] {display: none;}
33
- </style>
34
- """, unsafe_allow_html=True)
35
 
36
  #####################################
37
- # Preload Models
38
  #####################################
39
  @st.cache_resource(show_spinner=True)
40
  def load_models():
@@ -44,107 +34,67 @@ def load_models():
44
 
45
  # Load summarization model
46
  if has_pipeline:
47
- # Use pipeline if available, now using the updated model
48
- models['summarizer'] = pipeline(
49
- "summarization",
50
- model="Falconsai/text_summarization",
51
- max_length=100,
52
- truncation=True
53
- )
54
  else:
55
- # Fall back to basic model loading using the updated summarization model
56
  try:
57
  models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization")
58
  models['summarizer_tokenizer'] = AutoTokenizer.from_pretrained("Falconsai/text_summarization")
59
  except Exception as e:
60
  st.error(f"Error loading summarization model: {e}")
61
- models['summarizer_model'] = None
62
- models['summarizer_tokenizer'] = None
63
 
64
- # Load sentiment model for evaluation - updated model
65
  if has_pipeline:
66
- # Use pipeline if available
67
- models['evaluator'] = pipeline(
68
- "sentiment-analysis",
69
- model="CR7CAD/RobertaFinetuned"
70
- )
71
  else:
72
- # Fall back to basic model loading using the updated evaluation model
73
  try:
74
- models['evaluator_model'] = AutoModelForSequenceClassification.from_pretrained(
75
- "CR7CAD/RobertaFinetuned"
76
- )
77
- models['evaluator_tokenizer'] = AutoTokenizer.from_pretrained(
78
- "CR7CAD/RobertaFinetuned"
79
- )
80
  except Exception as e:
81
  st.error(f"Error loading sentiment model: {e}")
82
- models['evaluator_model'] = None
83
- models['evaluator_tokenizer'] = None
84
 
85
  return models
86
 
87
- # Custom text summarization function that works with or without pipeline
88
  def summarize_text(text, models, max_length=100):
89
- """Summarize text using available models"""
90
  # Truncate input to prevent issues with long texts
91
- input_text = text[:1024] # Limit input length
92
 
 
93
  if has_pipeline and 'summarizer' in models:
94
- # Use pipeline if available
95
  try:
96
- summary = models['summarizer'](input_text)[0]['summary_text']
97
- return summary
98
  except Exception as e:
99
  st.warning(f"Error in pipeline summarization: {e}")
100
 
101
- # Fall back to manual model inference
102
  if 'summarizer_model' in models and 'summarizer_tokenizer' in models and models['summarizer_model']:
103
  try:
104
  tokenizer = models['summarizer_tokenizer']
105
  model = models['summarizer_model']
106
-
107
- # Prepare inputs
108
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
109
-
110
- # Generate summary
111
- summary_ids = model.generate(
112
- inputs.input_ids,
113
- max_length=max_length,
114
- min_length=30,
115
- num_beams=4,
116
- early_stopping=True
117
- )
118
-
119
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
120
- return summary
121
  except Exception as e:
122
  st.warning(f"Error in manual summarization: {e}")
123
 
124
- # If all else fails, extract first few sentences
125
  return basic_summarize(text, max_length)
126
 
127
- # Basic text summarization as last fallback
128
  def basic_summarize(text, max_length=100):
129
- """Basic text summarization by extracting key sentences"""
130
- # Split into sentences
131
  sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
132
 
133
- # Score sentences by position (earlier is better) and length
134
  scored_sentences = []
135
  for i, sentence in enumerate(sentences):
136
- # Skip very short sentences
137
- if len(sentence.split()) < 4:
138
- continue
139
-
140
- # Simple scoring: earlier sentences get higher scores, penalize very long sentences
141
- score = 1.0 / (i + 1) - (0.01 * max(0, len(sentence.split()) - 20))
142
- scored_sentences.append((score, sentence))
143
 
144
- # Sort by score
145
  scored_sentences.sort(reverse=True)
146
-
147
- # Get top sentences until we reach max_length
148
  summary_sentences = []
149
  current_length = 0
150
 
@@ -155,183 +105,109 @@ def basic_summarize(text, max_length=100):
155
  else:
156
  break
157
 
158
- # Re-order sentences to match original order if we have more than one
159
  if summary_sentences:
160
- original_order = []
161
- for sentence in summary_sentences:
162
- original_order.append((sentences.index(sentence), sentence))
163
  original_order.sort()
164
  summary_sentences = [s for _, s in original_order]
165
 
166
- # Combine into a summary
167
- summary = " ".join(summary_sentences)
168
- return summary
169
-
170
- # Modified job fit evaluation function that uses a direct scoring approach
171
- def evaluate_job_fit(resume_summary, job_requirements, models):
172
- """
173
- Use a more direct method to evaluate job fit, rather than relying solely on sentiment analysis
174
- """
175
- start_time = time.time()
176
-
177
- # Extract basic information for context
178
- required_skills = job_requirements["required_skills"]
179
- years_required = job_requirements["years_experience"]
180
- job_title = job_requirements["title"]
181
- job_summary = job_requirements["summary"]
182
-
183
- # Extract skills from resume
184
- skills_mentioned = extract_skills(resume_summary)
185
-
186
- # Calculate skill match percentage
187
- matching_skills = [skill for skill in required_skills if skill in skills_mentioned]
188
- skill_match_percentage = len(matching_skills) / len(required_skills) if required_skills else 0
189
-
190
- # Extract experience level from resume
191
- experience_pattern = r'(\d+)\+?\s*years?\s*(?:of)?\s*experience'
192
- experience_match = re.search(experience_pattern, resume_summary, re.IGNORECASE)
193
- years_experience = 0
194
- if experience_match:
195
- try:
196
- years_experience = int(experience_match.group(1))
197
- except:
198
- years_experience = 0
199
-
200
- # Experience match
201
- exp_match_ratio = min(1.0, years_experience / max(1, years_required)) if years_required > 0 else 0.5
202
-
203
- # Check job title match
204
- job_title_lower = job_title.lower()
205
- title_match = 0
206
-
207
- # Look for job title words in resume
208
- title_words = [word for word in job_title_lower.split() if len(word) > 3]
209
- title_matches = sum(1 for word in title_words if word in resume_summary.lower())
210
- title_match = title_matches / len(title_words) if title_words else 0
211
-
212
- # Calculate scores for each dimension
213
- skill_score = min(2, skill_match_percentage * 3) # 0-2 scale
214
- exp_score = min(2, exp_match_ratio * 2) # 0-2 scale
215
- title_score = min(2, title_match * 2) # 0-2 scale
216
-
217
- # Extract name, age, industry from resume summary
218
- name_match = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary)
219
- name = name_match.group(1).strip() if name_match else "The candidate"
220
-
221
- age_match = re.search(r'Age:\s*(.*?)(?=\n|\Z)', resume_summary)
222
- age = age_match.group(1).strip() if age_match else "unspecified age"
223
-
224
- industry_match = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary)
225
- industry = industry_match.group(1).strip() if industry_match else "unspecified industry"
226
-
227
- # Calculate weighted final score
228
- # Skills: 50%, Experience: 30%, Title match: 20%
229
- weighted_score = (skill_score * 0.5) + (exp_score * 0.3) + (title_score * 0.2)
230
-
231
- # Convert to integer score (0-2)
232
- if weighted_score >= 1.5:
233
- fit_score = 2 # Good fit
234
- elif weighted_score >= 0.8:
235
- fit_score = 1 # Potential fit
236
- else:
237
- fit_score = 0 # Not a fit
238
-
239
- # Generate assessment text based on score
240
- missing_skills = [skill for skill in required_skills if skill not in skills_mentioned]
241
-
242
- if fit_score == 2:
243
- fit_assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} and professional experience appear well-suited for this role's requirements. The technical expertise matches what the position demands."
244
- elif fit_score == 1:
245
- fit_assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role with some relevant experience, though there are gaps in certain technical areas. Their {industry} background provides partial alignment with the position requirements. Additional training might be needed in {', '.join(missing_skills[:2])} if pursuing this opportunity."
246
- else:
247
- fit_assessment = f"{fit_score}: NO FIT - {name}'s current background shows limited alignment with this {job_title} position. Their experience level and technical background differ significantly from the role requirements. A position better matching their {industry} expertise might be more suitable."
248
-
249
- execution_time = time.time() - start_time
250
-
251
- return fit_assessment, fit_score, execution_time
252
 
253
  #####################################
254
- # Function: Extract Text from File
255
  #####################################
256
  @st.cache_data(show_spinner=False)
257
  def extract_text_from_file(file_obj):
258
- """
259
- Extract text from .docx and .doc files.
260
- Returns the extracted text or an error message if extraction fails.
261
- """
262
  filename = file_obj.name
263
  ext = os.path.splitext(filename)[1].lower()
264
- text = ""
265
-
266
  if ext == ".docx":
267
  try:
268
  document = docx.Document(file_obj)
269
  text = "\n".join(para.text for para in document.paragraphs if para.text.strip())
270
  except Exception as e:
271
- text = f"Error processing DOCX file: {e}"
272
  elif ext == ".doc":
273
  try:
274
- # For .doc files, we need to save to a temp file
275
  with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file:
276
  temp_file.write(file_obj.getvalue())
277
  temp_path = temp_file.name
278
-
279
- # Use docx2txt which is generally faster
280
- try:
281
- text = docx2txt.process(temp_path)
282
- except Exception:
283
- text = "Could not process .doc file. Please convert to .docx format."
284
-
285
- # Clean up temp file
286
  os.unlink(temp_path)
287
  except Exception as e:
288
- text = f"Error processing DOC file: {e}"
289
  elif ext == ".txt":
290
  try:
291
  text = file_obj.getvalue().decode("utf-8")
292
  except Exception as e:
293
- text = f"Error processing TXT file: {e}"
294
  else:
295
- text = "Unsupported file type. Please upload a .docx, .doc, or .txt file."
296
 
297
- # Limit text size for faster processing
298
  return text[:15000] if text else text
299
 
300
- #####################################
301
- # Functions for Information Extraction
302
- #####################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
 
304
- # Extract age from resume
305
  def extract_age(text):
306
  """Extract candidate age from resume text"""
307
- # Simplified: just check a few common patterns
308
  age_patterns = [
309
  r'age:?\s*(\d{1,2})',
310
  r'(\d{1,2})\s*years\s*old',
311
- r'dob:.*(\d{4})', # Year of birth
312
- r'date of birth:.*(\d{4})' # Year of birth
313
  ]
314
 
315
  text_lower = text.lower()
316
  for pattern in age_patterns:
317
  matches = re.search(pattern, text_lower)
318
  if matches:
319
- # If it's a year of birth, calculate approximate age
320
- if len(matches.group(1)) == 4: # It's a year
321
  try:
322
- birth_year = int(matches.group(1))
323
- current_year = 2025 # Current year
324
- return str(current_year - birth_year)
325
  except:
326
  pass
327
  return matches.group(1)
328
 
329
  return "Not specified"
330
 
331
- # Extract industry preference
332
  def extract_industry(text):
333
  """Extract expected job industry from resume"""
334
- # Common industry keywords
335
  industry_keywords = {
336
  "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"],
337
  "Finance": ["banking", "financial", "accounting", "finance", "analyst"],
@@ -346,23 +222,13 @@ def extract_industry(text):
346
  }
347
 
348
  text_lower = text.lower()
349
- industry_counts = {}
350
-
351
- for industry, keywords in industry_keywords.items():
352
- count = sum(text_lower.count(keyword.lower()) for keyword in keywords)
353
- if count > 0:
354
- industry_counts[industry] = count
355
-
356
- if industry_counts:
357
- # Return the industry with the highest keyword count
358
- return max(industry_counts.items(), key=lambda x: x[1])[0]
359
 
360
- return "Not clearly specified"
361
 
362
- # Extract job position preference
363
  def extract_job_position(text):
364
  """Extract expected job position from resume"""
365
- # Look for objective or summary section
366
  objective_patterns = [
367
  r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
368
  r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
@@ -376,28 +242,21 @@ def extract_job_position(text):
376
  match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL)
377
  if match:
378
  objective_text = match.group(1).strip()
379
- # Look for job titles in the objective
380
  job_titles = ["developer", "engineer", "analyst", "manager", "director", "specialist",
381
  "coordinator", "consultant", "designer", "architect", "administrator"]
382
 
383
  for title in job_titles:
384
  if title in objective_text:
385
- # Try to get the full title with context
386
  title_pattern = r'(?:a|an)?\s*(\w+\s+' + title + r'|\w+\s+\w+\s+' + title + r')'
387
  title_match = re.search(title_pattern, objective_text)
388
  if title_match:
389
  return title_match.group(1).strip().title()
390
  return title.title()
391
 
392
- # If no specific title found but we have objective text, return a summary
393
  if len(objective_text) > 10:
394
- # Truncate and clean up objective
395
  words = objective_text.split()
396
- if len(words) > 10:
397
- return " ".join(words[:10]).title() + "..."
398
- return objective_text.title()
399
 
400
- # Check current/most recent job title
401
  job_patterns = [
402
  r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\(|\s*-|\s*,|\s*\d{4}|\n)',
403
  r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*current\s*\)',
@@ -411,64 +270,11 @@ def extract_job_position(text):
411
 
412
  return "Not explicitly stated"
413
 
414
- # Extract name
415
- @lru_cache(maxsize=32)
416
- def extract_name(text_start):
417
- """Extract candidate name from the beginning of resume text"""
418
- # Only use the first 500 characters to speed up processing
419
- lines = text_start.split('\n')
420
-
421
- # Check first few non-empty lines for potential names
422
- potential_name_lines = [line.strip() for line in lines[:5] if line.strip()]
423
-
424
- if potential_name_lines:
425
- # First line is often the name if it's short and doesn't contain common headers
426
- first_line = potential_name_lines[0]
427
- if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae", "profile"]):
428
- return first_line
429
-
430
- # Look for lines that might contain a name
431
- for line in potential_name_lines[:3]:
432
- if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]):
433
- return line
434
-
435
- return "Unknown (please extract from resume)"
436
-
437
- # Extract skills
438
- def extract_skills(text):
439
- """Extract key skills from the resume"""
440
- # Common skill categories - reduced keyword list for speed
441
- skill_categories = {
442
- "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "Go", "React", "Angular", "Vue", "Node.js"],
443
- "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "Algorithms", "NLP", "Deep Learning"],
444
- "Database": ["SQL", "MySQL", "MongoDB", "Database", "NoSQL", "PostgreSQL", "Oracle", "Redis"],
445
- "Web Development": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API", "GraphQL"],
446
- "Software Development": ["Agile", "Scrum", "Git", "DevOps", "Docker", "System Design", "CI/CD", "Jenkins"],
447
- "Cloud": ["AWS", "Azure", "Google Cloud", "Cloud Computing", "Lambda", "S3", "EC2"],
448
- "Security": ["Cybersecurity", "Network Security", "Encryption", "Security"],
449
- "Business": ["Project Management", "Business Analysis", "Leadership", "Teamwork", "Agile", "Scrum"],
450
- "Design": ["UX/UI", "User Experience", "Design Thinking", "Adobe", "Figma"]
451
- }
452
-
453
- # Process everything at once
454
- text_lower = text.lower()
455
-
456
- # Skills extraction
457
- all_skills = []
458
- for category, skills in skill_categories.items():
459
- for skill in skills:
460
- if skill.lower() in text_lower:
461
- all_skills.append(skill)
462
-
463
- return all_skills
464
-
465
  #####################################
466
- # Function: Summarize Resume Text
467
  #####################################
468
  def summarize_resume_text(resume_text, models):
469
- """
470
- Generates a structured summary of the resume text with the critical information
471
- """
472
  start_time = time.time()
473
 
474
  # Extract critical information
@@ -478,27 +284,17 @@ def summarize_resume_text(resume_text, models):
478
  job_position = extract_job_position(resume_text)
479
  skills = extract_skills(resume_text)
480
 
481
- # Generate overall summary using the pipeline model if available
482
  try:
483
  if has_pipeline and 'summarizer' in models:
484
- # Truncate text to avoid issues with very long resumes
485
- truncated_text = resume_text[:2000] # Limit input to 2000 chars
486
-
487
- # Use pipeline model to generate the summary
488
- model_summary = models['summarizer'](
489
- truncated_text,
490
- max_length=100,
491
- min_length=30,
492
- do_sample=False
493
- )[0]['summary_text']
494
  else:
495
- # Fallback if pipeline is not available
496
  model_summary = summarize_text(resume_text, models, max_length=100)
497
  except Exception as e:
498
  st.warning(f"Error in resume summarization: {e}")
499
  model_summary = "Error generating summary. Please check the original resume."
500
 
501
- # Format the structured summary with different paragraphs for each critical piece
502
  formatted_summary = f"Name: {name}\n\n"
503
  formatted_summary += f"Age: {age}\n\n"
504
  formatted_summary += f"Expected Industry: {industry}\n\n"
@@ -506,33 +302,18 @@ def summarize_resume_text(resume_text, models):
506
  formatted_summary += f"Skills: {', '.join(skills)}\n\n"
507
  formatted_summary += f"Summary: {model_summary}"
508
 
509
- execution_time = time.time() - start_time
510
-
511
- return formatted_summary, execution_time
512
 
513
- #####################################
514
- # Function: Extract Job Requirements
515
- #####################################
516
  def extract_job_requirements(job_description, models):
517
- """
518
- Extract key requirements from a job description
519
- """
520
- # Common technical skills to look for - expanded list for better matching
521
  tech_skills = [
522
- "Python", "Java", "C++", "JavaScript", "TypeScript", "Go", "Rust", "SQL", "Ruby", "PHP", "Swift", "Kotlin",
523
- "React", "Angular", "Vue", "Node.js", "HTML", "CSS", "Django", "Flask", "Spring", "REST API", "GraphQL",
524
- "Machine Learning", "TensorFlow", "PyTorch", "Data Science", "AI", "Big Data", "Deep Learning", "NLP",
525
- "AWS", "Azure", "GCP", "Docker", "Kubernetes", "CI/CD", "Jenkins", "GitHub Actions", "Terraform",
526
- "MySQL", "PostgreSQL", "MongoDB", "Redis", "Elasticsearch", "DynamoDB", "Cassandra", "Oracle",
527
- "Project Management", "Agile", "Scrum", "UX/UI", "Design", "Leadership", "Team Management",
528
- "Communication Skills", "Problem Solving", "Critical Thinking", "Blockchain", "Information Security",
529
- "Networking", "Linux", "Windows Server", "Excel", "PowerPoint", "Word", "Tableau", "Power BI", "R",
530
- "SPSS", "SAS", "Spark", "Hadoop", "JIRA", "Confluence", "Git", "SVN", "Testing", "QA", "DevOps",
531
- "Full Stack", "Mobile Development", "Android", "iOS", "React Native", "Flutter", "SEO", "Marketing",
532
- "Sales", "Customer Service", "Business Analysis", "Data Analysis", "Accounting", "Finance"
533
  ]
534
 
535
- # Clean the text for processing
536
  clean_job_text = job_description.lower()
537
 
538
  # Extract job title
@@ -547,7 +328,7 @@ def extract_job_requirements(job_description, models):
547
  title_match = re.search(pattern, clean_job_text, re.IGNORECASE)
548
  if title_match:
549
  potential_title = title_match.group(1).strip() if len(title_match.groups()) >= 1 else title_match.group(2).strip()
550
- if 3 <= len(potential_title) <= 50: # Reasonable title length
551
  job_title = potential_title.capitalize()
552
  break
553
 
@@ -570,52 +351,98 @@ def extract_job_requirements(job_description, models):
570
  # Extract required skills
571
  required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_job_text)]
572
 
573
- # If no skills found, use some default important ones to avoid empty lists
574
  if not required_skills:
575
- # Extract some common words that might be skills
576
  words = re.findall(r'\b\w{4,}\b', clean_job_text)
577
  word_counts = {}
578
  for word in words:
579
  if word not in ["with", "that", "this", "have", "from", "they", "will", "what", "your", "their", "about"]:
580
  word_counts[word] = word_counts.get(word, 0) + 1
581
-
582
- # Get the top 5 most common words as potential skills
583
  sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
584
  required_skills = [word.capitalize() for word, _ in sorted_words[:5]]
585
 
586
- # Create a simple summary of the job using the summarize_text function
587
  job_summary = summarize_text(job_description, models, max_length=100)
588
 
589
- # Format the job requirements
590
- job_requirements = {
591
  "title": job_title,
592
  "years_experience": years_required,
593
  "required_skills": required_skills,
594
  "summary": job_summary
595
  }
596
-
597
- return job_requirements
598
 
599
- #####################################
600
- # Function: Analyze Job Fit
601
- #####################################
602
- def analyze_job_fit(resume_summary, job_description, models):
603
- """
604
- Analyze how well the candidate fits the job requirements.
605
- Returns a fit score (0-2) and an assessment.
606
- """
607
  start_time = time.time()
608
 
609
- # Extract job requirements
610
- job_requirements = extract_job_requirements(job_description, models)
 
 
 
611
 
612
- # Use our more thorough evaluation function
613
- assessment, fit_score, execution_time = evaluate_job_fit(resume_summary, job_requirements, models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
614
 
615
- return assessment, fit_score, execution_time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616
 
617
- # Load models at startup
618
- models = load_models()
 
 
 
 
619
 
620
  #####################################
621
  # Main Function
@@ -623,21 +450,17 @@ models = load_models()
623
  def main():
624
  """Main function for the Streamlit application"""
625
  st.title("Resume-Job Fit Analyzer")
626
- st.markdown(
627
- """
628
- Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match with the job requirements.
629
- """
630
- )
631
 
632
- # Resume upload
 
 
 
633
  uploaded_file = st.file_uploader("Upload your resume (.docx, .doc, or .txt)", type=["docx", "doc", "txt"])
634
-
635
- # Job description input
636
  job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...")
637
 
638
- # Process button with optimized flow
639
  if uploaded_file is not None and job_description and st.button("Analyze Job Fit"):
640
- # Create a placeholder for the progress bar
641
  progress_bar = st.progress(0)
642
  status_text = st.empty()
643
 
@@ -662,31 +485,19 @@ def main():
662
  status_text.text("Step 3/3: Evaluating job fit (this will take a moment)...")
663
  assessment, fit_score, assessment_time = analyze_job_fit(summary, job_description, models)
664
  progress_bar.progress(100)
665
-
666
- # Clear status messages
667
  status_text.empty()
668
 
669
- # Display job fit results
670
  st.subheader("Job Fit Assessment")
671
-
672
- # Display fit score with label
673
- fit_labels = {
674
- 0: "NOT FIT",
675
- 1: "POTENTIAL FIT",
676
- 2: "GOOD FIT"
677
- }
678
 
679
- # Show the score prominently with appropriate coloring
680
- score_label = fit_labels[fit_score]
681
  score_colors = {0: "red", 1: "orange", 2: "green"}
682
- st.markdown(f"<h2 style='color: {score_colors[fit_score]};'>{score_label}</h2>", unsafe_allow_html=True)
683
-
684
- # Display assessment
685
  st.markdown(assessment)
686
-
687
  st.info(f"Analysis completed in {(summarization_time + assessment_time):.2f} seconds")
688
 
689
- # Add potential next steps based on the fit score
690
  st.subheader("Recommended Next Steps")
691
 
692
  if fit_score == 2:
@@ -708,6 +519,5 @@ def main():
708
  - Consider similar roles with fewer experience requirements
709
  """)
710
 
711
- # Run the main function
712
  if __name__ == "__main__":
713
  main()
 
9
  import pandas as pd
10
  from functools import lru_cache
11
 
12
+ # Handle imports
13
  try:
14
  from transformers import pipeline
15
  has_pipeline = True
 
20
  st.warning("Using basic transformers functionality instead of pipeline API")
21
 
22
  # Set page title and hide sidebar
23
+ st.set_page_config(page_title="Resume-Job Fit Analyzer", initial_sidebar_state="collapsed")
24
+ st.markdown("""<style>[data-testid="collapsedControl"] {display: none;}section[data-testid="stSidebar"] {display: none;}</style>""", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
25
 
26
  #####################################
27
+ # Preload Models & Helper Functions
28
  #####################################
29
  @st.cache_resource(show_spinner=True)
30
  def load_models():
 
34
 
35
  # Load summarization model
36
  if has_pipeline:
37
+ models['summarizer'] = pipeline("summarization", model="Falconsai/text_summarization", max_length=100, truncation=True)
 
 
 
 
 
 
38
  else:
 
39
  try:
40
  models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization")
41
  models['summarizer_tokenizer'] = AutoTokenizer.from_pretrained("Falconsai/text_summarization")
42
  except Exception as e:
43
  st.error(f"Error loading summarization model: {e}")
44
+ models['summarizer_model'] = models['summarizer_tokenizer'] = None
 
45
 
46
+ # Load evaluation model
47
  if has_pipeline:
48
+ models['evaluator'] = pipeline("sentiment-analysis", model="CR7CAD/RobertaFinetuned")
 
 
 
 
49
  else:
 
50
  try:
51
+ models['evaluator_model'] = AutoModelForSequenceClassification.from_pretrained("CR7CAD/RobertaFinetuned")
52
+ models['evaluator_tokenizer'] = AutoTokenizer.from_pretrained("CR7CAD/RobertaFinetuned")
 
 
 
 
53
  except Exception as e:
54
  st.error(f"Error loading sentiment model: {e}")
55
+ models['evaluator_model'] = models['evaluator_tokenizer'] = None
 
56
 
57
  return models
58
 
 
59
  def summarize_text(text, models, max_length=100):
60
+ """Summarize text using available models with fallbacks"""
61
  # Truncate input to prevent issues with long texts
62
+ input_text = text[:1024]
63
 
64
+ # Try pipeline first
65
  if has_pipeline and 'summarizer' in models:
 
66
  try:
67
+ return models['summarizer'](input_text)[0]['summary_text']
 
68
  except Exception as e:
69
  st.warning(f"Error in pipeline summarization: {e}")
70
 
71
+ # Try manual model
72
  if 'summarizer_model' in models and 'summarizer_tokenizer' in models and models['summarizer_model']:
73
  try:
74
  tokenizer = models['summarizer_tokenizer']
75
  model = models['summarizer_model']
 
 
76
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
77
+ summary_ids = model.generate(inputs.input_ids, max_length=max_length, min_length=30, num_beams=4, early_stopping=True)
78
+ return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
79
  except Exception as e:
80
  st.warning(f"Error in manual summarization: {e}")
81
 
82
+ # Fallback to basic summarization
83
  return basic_summarize(text, max_length)
84
 
 
85
  def basic_summarize(text, max_length=100):
86
+ """Basic extractive text summarization"""
 
87
  sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
88
 
89
+ # Score and filter sentences
90
  scored_sentences = []
91
  for i, sentence in enumerate(sentences):
92
+ if len(sentence.split()) >= 4:
93
+ score = 1.0 / (i + 1) - (0.01 * max(0, len(sentence.split()) - 20))
94
+ scored_sentences.append((score, sentence))
 
 
 
 
95
 
96
+ # Get top sentences
97
  scored_sentences.sort(reverse=True)
 
 
98
  summary_sentences = []
99
  current_length = 0
100
 
 
105
  else:
106
  break
107
 
108
+ # Restore original sentence order
109
  if summary_sentences:
110
+ original_order = [(sentences.index(s), s) for s in summary_sentences]
 
 
111
  original_order.sort()
112
  summary_sentences = [s for _, s in original_order]
113
 
114
+ return " ".join(summary_sentences)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  #####################################
117
+ # Information Extraction Functions
118
  #####################################
119
  @st.cache_data(show_spinner=False)
120
  def extract_text_from_file(file_obj):
121
+ """Extract text from uploaded document file"""
 
 
 
122
  filename = file_obj.name
123
  ext = os.path.splitext(filename)[1].lower()
124
+
 
125
  if ext == ".docx":
126
  try:
127
  document = docx.Document(file_obj)
128
  text = "\n".join(para.text for para in document.paragraphs if para.text.strip())
129
  except Exception as e:
130
+ return f"Error processing DOCX file: {e}"
131
  elif ext == ".doc":
132
  try:
 
133
  with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file:
134
  temp_file.write(file_obj.getvalue())
135
  temp_path = temp_file.name
136
+
137
+ text = docx2txt.process(temp_path)
 
 
 
 
 
 
138
  os.unlink(temp_path)
139
  except Exception as e:
140
+ return f"Error processing DOC file: {e}"
141
  elif ext == ".txt":
142
  try:
143
  text = file_obj.getvalue().decode("utf-8")
144
  except Exception as e:
145
+ return f"Error processing TXT file: {e}"
146
  else:
147
+ return "Unsupported file type. Please upload a .docx, .doc, or .txt file."
148
 
 
149
  return text[:15000] if text else text
150
 
151
+ def extract_skills(text):
152
+ """Extract key skills from the resume"""
153
+ skill_keywords = {
154
+ "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "Go", "React", "Angular", "Vue", "Node.js"],
155
+ "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "Algorithms", "NLP", "Deep Learning"],
156
+ "Database": ["SQL", "MySQL", "MongoDB", "Database", "NoSQL", "PostgreSQL", "Oracle", "Redis"],
157
+ "Web Development": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API", "GraphQL"],
158
+ "Software Development": ["Agile", "Scrum", "Git", "DevOps", "Docker", "System Design", "CI/CD", "Jenkins"],
159
+ "Cloud": ["AWS", "Azure", "Google Cloud", "Cloud Computing", "Lambda", "S3", "EC2"],
160
+ "Security": ["Cybersecurity", "Network Security", "Encryption", "Security"],
161
+ "Business": ["Project Management", "Business Analysis", "Leadership", "Teamwork", "Agile", "Scrum"],
162
+ "Design": ["UX/UI", "User Experience", "Design Thinking", "Adobe", "Figma"]
163
+ }
164
+
165
+ text_lower = text.lower()
166
+ return [skill for category, skills in skill_keywords.items()
167
+ for skill in skills if skill.lower() in text_lower]
168
+
169
+ @lru_cache(maxsize=32)
170
+ def extract_name(text_start):
171
+ """Extract candidate name from the beginning of resume text"""
172
+ lines = text_start.split('\n')
173
+ potential_name_lines = [line.strip() for line in lines[:5] if line.strip()]
174
+
175
+ if potential_name_lines:
176
+ first_line = potential_name_lines[0]
177
+ if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae", "profile"]):
178
+ return first_line
179
+
180
+ for line in potential_name_lines[:3]:
181
+ if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]):
182
+ return line
183
+
184
+ return "Unknown (please extract from resume)"
185
 
 
186
  def extract_age(text):
187
  """Extract candidate age from resume text"""
 
188
  age_patterns = [
189
  r'age:?\s*(\d{1,2})',
190
  r'(\d{1,2})\s*years\s*old',
191
+ r'dob:.*(\d{4})',
192
+ r'date of birth:.*(\d{4})'
193
  ]
194
 
195
  text_lower = text.lower()
196
  for pattern in age_patterns:
197
  matches = re.search(pattern, text_lower)
198
  if matches:
199
+ # Convert birth year to age if needed
200
+ if len(matches.group(1)) == 4:
201
  try:
202
+ return str(2025 - int(matches.group(1)))
 
 
203
  except:
204
  pass
205
  return matches.group(1)
206
 
207
  return "Not specified"
208
 
 
209
  def extract_industry(text):
210
  """Extract expected job industry from resume"""
 
211
  industry_keywords = {
212
  "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"],
213
  "Finance": ["banking", "financial", "accounting", "finance", "analyst"],
 
222
  }
223
 
224
  text_lower = text.lower()
225
+ industry_counts = {industry: sum(text_lower.count(keyword.lower()) for keyword in keywords)
226
+ for industry, keywords in industry_keywords.items()}
 
 
 
 
 
 
 
 
227
 
228
+ return max(industry_counts.items(), key=lambda x: x[1])[0] if any(industry_counts.values()) else "Not clearly specified"
229
 
 
230
  def extract_job_position(text):
231
  """Extract expected job position from resume"""
 
232
  objective_patterns = [
233
  r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
234
  r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
 
242
  match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL)
243
  if match:
244
  objective_text = match.group(1).strip()
 
245
  job_titles = ["developer", "engineer", "analyst", "manager", "director", "specialist",
246
  "coordinator", "consultant", "designer", "architect", "administrator"]
247
 
248
  for title in job_titles:
249
  if title in objective_text:
 
250
  title_pattern = r'(?:a|an)?\s*(\w+\s+' + title + r'|\w+\s+\w+\s+' + title + r')'
251
  title_match = re.search(title_pattern, objective_text)
252
  if title_match:
253
  return title_match.group(1).strip().title()
254
  return title.title()
255
 
 
256
  if len(objective_text) > 10:
 
257
  words = objective_text.split()
258
+ return " ".join(words[:10]).title() + "..." if len(words) > 10 else objective_text.title()
 
 
259
 
 
260
  job_patterns = [
261
  r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\(|\s*-|\s*,|\s*\d{4}|\n)',
262
  r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*current\s*\)',
 
270
 
271
  return "Not explicitly stated"
272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
  #####################################
274
+ # Core Analysis Functions
275
  #####################################
276
  def summarize_resume_text(resume_text, models):
277
+ """Generate a structured summary of resume text"""
 
 
278
  start_time = time.time()
279
 
280
  # Extract critical information
 
284
  job_position = extract_job_position(resume_text)
285
  skills = extract_skills(resume_text)
286
 
287
+ # Generate overall summary
288
  try:
289
  if has_pipeline and 'summarizer' in models:
290
+ model_summary = models['summarizer'](resume_text[:2000], max_length=100, min_length=30, do_sample=False)[0]['summary_text']
 
 
 
 
 
 
 
 
 
291
  else:
 
292
  model_summary = summarize_text(resume_text, models, max_length=100)
293
  except Exception as e:
294
  st.warning(f"Error in resume summarization: {e}")
295
  model_summary = "Error generating summary. Please check the original resume."
296
 
297
+ # Format the structured summary
298
  formatted_summary = f"Name: {name}\n\n"
299
  formatted_summary += f"Age: {age}\n\n"
300
  formatted_summary += f"Expected Industry: {industry}\n\n"
 
302
  formatted_summary += f"Skills: {', '.join(skills)}\n\n"
303
  formatted_summary += f"Summary: {model_summary}"
304
 
305
+ return formatted_summary, time.time() - start_time
 
 
306
 
 
 
 
307
  def extract_job_requirements(job_description, models):
308
+ """Extract key requirements from a job description"""
309
+ # Combined skill list (abridged for brevity)
 
 
310
  tech_skills = [
311
+ "Python", "Java", "C++", "JavaScript", "TypeScript", "SQL", "HTML", "CSS", "React", "Angular",
312
+ "Machine Learning", "Data Science", "AI", "AWS", "Azure", "Docker", "Kubernetes", "MySQL",
313
+ "MongoDB", "PostgreSQL", "Project Management", "Agile", "Scrum", "Leadership", "Communication",
314
+ "Problem Solving", "Git", "DevOps", "Full Stack", "Mobile Development", "Android", "iOS"
 
 
 
 
 
 
 
315
  ]
316
 
 
317
  clean_job_text = job_description.lower()
318
 
319
  # Extract job title
 
328
  title_match = re.search(pattern, clean_job_text, re.IGNORECASE)
329
  if title_match:
330
  potential_title = title_match.group(1).strip() if len(title_match.groups()) >= 1 else title_match.group(2).strip()
331
+ if 3 <= len(potential_title) <= 50:
332
  job_title = potential_title.capitalize()
333
  break
334
 
 
351
  # Extract required skills
352
  required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_job_text)]
353
 
354
+ # Fallback if no skills found
355
  if not required_skills:
 
356
  words = re.findall(r'\b\w{4,}\b', clean_job_text)
357
  word_counts = {}
358
  for word in words:
359
  if word not in ["with", "that", "this", "have", "from", "they", "will", "what", "your", "their", "about"]:
360
  word_counts[word] = word_counts.get(word, 0) + 1
 
 
361
  sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
362
  required_skills = [word.capitalize() for word, _ in sorted_words[:5]]
363
 
 
364
  job_summary = summarize_text(job_description, models, max_length=100)
365
 
366
+ return {
 
367
  "title": job_title,
368
  "years_experience": years_required,
369
  "required_skills": required_skills,
370
  "summary": job_summary
371
  }
 
 
372
 
373
+ def evaluate_job_fit(resume_summary, job_requirements, models):
374
+ """Evaluate how well a resume matches job requirements"""
 
 
 
 
 
 
375
  start_time = time.time()
376
 
377
+ # Extract information
378
+ required_skills = job_requirements["required_skills"]
379
+ years_required = job_requirements["years_experience"]
380
+ job_title = job_requirements["title"]
381
+ skills_mentioned = extract_skills(resume_summary)
382
 
383
+ # Calculate match percentages
384
+ matching_skills = [skill for skill in required_skills if skill in skills_mentioned]
385
+ skill_match_percentage = len(matching_skills) / len(required_skills) if required_skills else 0
386
+
387
+ # Extract experience level from resume
388
+ experience_pattern = r'(\d+)\+?\s*years?\s*(?:of)?\s*experience'
389
+ years_experience = 0
390
+ experience_match = re.search(experience_pattern, resume_summary, re.IGNORECASE)
391
+ if experience_match:
392
+ try:
393
+ years_experience = int(experience_match.group(1))
394
+ except:
395
+ pass
396
+
397
+ # Calculate match scores
398
+ exp_match_ratio = min(1.0, years_experience / max(1, years_required)) if years_required > 0 else 0.5
399
 
400
+ # Job title match score
401
+ title_words = [word for word in job_title.lower().split() if len(word) > 3]
402
+ title_matches = sum(1 for word in title_words if word in resume_summary.lower())
403
+ title_match = title_matches / len(title_words) if title_words else 0
404
+
405
+ # Calculate individual scores
406
+ skill_score = min(2, skill_match_percentage * 3)
407
+ exp_score = min(2, exp_match_ratio * 2)
408
+ title_score = min(2, title_match * 2)
409
+
410
+ # Extract candidate info
411
+ name_match = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary)
412
+ name = name_match.group(1).strip() if name_match else "The candidate"
413
+
414
+ industry_match = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary)
415
+ industry = industry_match.group(1).strip() if industry_match else "unspecified industry"
416
+
417
+ # Calculate final weighted score
418
+ weighted_score = (skill_score * 0.5) + (exp_score * 0.3) + (title_score * 0.2)
419
+
420
+ # Determine fit score
421
+ if weighted_score >= 1.5:
422
+ fit_score = 2 # Good fit
423
+ elif weighted_score >= 0.8:
424
+ fit_score = 1 # Potential fit
425
+ else:
426
+ fit_score = 0 # Not a fit
427
+
428
+ # Generate assessment text
429
+ missing_skills = [skill for skill in required_skills if skill not in skills_mentioned]
430
+
431
+ if fit_score == 2:
432
+ fit_assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} and professional experience appear well-suited for this role's requirements. The technical expertise matches what the position demands."
433
+ elif fit_score == 1:
434
+ fit_assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role with some relevant experience, though there are gaps in certain technical areas. Their {industry} background provides partial alignment with the position requirements. Additional training might be needed in {', '.join(missing_skills[:2])} if pursuing this opportunity."
435
+ else:
436
+ fit_assessment = f"{fit_score}: NO FIT - {name}'s current background shows limited alignment with this {job_title} position. Their experience level and technical background differ significantly from the role requirements. A position better matching their {industry} expertise might be more suitable."
437
+
438
+ return fit_assessment, fit_score, time.time() - start_time
439
 
440
+ def analyze_job_fit(resume_summary, job_description, models):
441
+ """End-to-end job fit analysis"""
442
+ start_time = time.time()
443
+ job_requirements = extract_job_requirements(job_description, models)
444
+ assessment, fit_score, execution_time = evaluate_job_fit(resume_summary, job_requirements, models)
445
+ return assessment, fit_score, time.time() - start_time
446
 
447
  #####################################
448
  # Main Function
 
450
  def main():
451
  """Main function for the Streamlit application"""
452
  st.title("Resume-Job Fit Analyzer")
453
+ st.markdown("Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match with the job requirements.")
 
 
 
 
454
 
455
+ # Load models
456
+ models = load_models()
457
+
458
+ # User inputs
459
  uploaded_file = st.file_uploader("Upload your resume (.docx, .doc, or .txt)", type=["docx", "doc", "txt"])
 
 
460
  job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...")
461
 
462
+ # Process when button clicked
463
  if uploaded_file is not None and job_description and st.button("Analyze Job Fit"):
 
464
  progress_bar = st.progress(0)
465
  status_text = st.empty()
466
 
 
485
  status_text.text("Step 3/3: Evaluating job fit (this will take a moment)...")
486
  assessment, fit_score, assessment_time = analyze_job_fit(summary, job_description, models)
487
  progress_bar.progress(100)
 
 
488
  status_text.empty()
489
 
490
+ # Display results
491
  st.subheader("Job Fit Assessment")
 
 
 
 
 
 
 
492
 
493
+ # Display score with appropriate styling
494
+ fit_labels = {0: "NOT FIT", 1: "POTENTIAL FIT", 2: "GOOD FIT"}
495
  score_colors = {0: "red", 1: "orange", 2: "green"}
496
+ st.markdown(f"<h2 style='color: {score_colors[fit_score]};'>{fit_labels[fit_score]}</h2>", unsafe_allow_html=True)
 
 
497
  st.markdown(assessment)
 
498
  st.info(f"Analysis completed in {(summarization_time + assessment_time):.2f} seconds")
499
 
500
+ # Recommendations
501
  st.subheader("Recommended Next Steps")
502
 
503
  if fit_score == 2:
 
519
  - Consider similar roles with fewer experience requirements
520
  """)
521
 
 
522
  if __name__ == "__main__":
523
  main()